repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
CiscoSystems/jujucharm-n1k | charms/precise/quantum-gateway/hooks/quantum_contexts.py | Python | apache-2.0 | 6,598 | 0.000303 | # vim: set ts=4:et
import os
import uuid
import socket
from charmhelpers.core.hookenv import (
config,
relation_ids,
related_units,
relation_get,
unit_get,
cached,
)
from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.contrib.openstack.context import (
OSContextGenerator,
context_complete
)
from charmhelpers.contrib.openstack.utils import (
get_os_codename_install_source
)
from charmhelpers.contrib.hahelpers.cluster import(
eligible_leader
)
DB_USER = "quantum"
QUANTUM_DB = "quantum"
NOVA_DB_USER = "nova"
NOVA_DB = "nova"
QUANTUM_OVS_PLUGIN = \
"quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
QUANTUM_NVP_PLUGIN = \
"quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2"
QUANTUM_N1KV_PLUGIN = \
"quantum.plugins.cisco.n1kv.n1kv_quantum_plugin.N1kvQuantumPluginV2"
NEUTRON_OVS_PLUGIN = \
"neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2"
NEUTRON_NVP_PLUGIN = \
"neutron.plugins.nicira.nicira_nvp_plugin.NeutronPlugin.NvpPluginV2"
NEUTRON_N1KV_PLUGIN = \
"neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2"
NEUTRON = 'neutron'
QUANTUM = 'quantum'
def networking_name():
''' Determine whether neutron or quantum should be used for name '''
if get_os_codename_install_source(config('openstack-origin')) >= 'havana':
return NEUTRON
else:
return QUANTUM
OVS = 'ovs'
NVP = 'nvp'
N1KV = 'n1kv'
CORE_PLUGIN = {
QUANTUM: {
OVS: QUANTUM_OVS_PLUGIN,
NVP: QUANTUM_NVP_PLUGIN,
N1KV: QUANTUM_N1KV_PLUGIN
},
NEUTRON: {
OVS: NEUTRON_OVS_PLUGIN,
NVP: NEUTRON_NVP_PLUGIN,
N1KV: NEUTRON_N1KV_PLUGIN
},
}
def core_plugin():
return CORE_PLUGIN[networking_name()][config('plugin')]
class NetworkServiceContext(OSContextGenerator):
interfaces = ['quantum-network-service']
def __call__(self):
for rid in relation_ids('quantum-network-service'):
for unit in related_units(rid):
ctxt = {
'keystone_host': relation_get('keystone_host',
rid=rid, unit=unit),
'service_port': relation_get('service_port', rid=rid,
unit=unit),
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
'service_tenant': relation_get('service_tenant',
rid=rid, unit=unit),
'service_username': relation_get('service_username',
rid=rid, unit=unit),
'service_password': relation_get('service_password',
rid=rid, unit=unit),
'quantum_host': relation_get('quantum_host',
rid=rid, unit=unit),
'quantum_port': relation_get('quantum_port',
rid=rid, unit=unit),
'quantum_url': relation_get('quantum_url',
rid=rid, unit=unit),
'region': relation_get('region',
rid=rid, unit=unit),
# XXX: Hard-coded http.
'service_protocol': 'http',
'auth_protocol': 'http',
}
if context_complete(ctxt):
return ctxt
return {}
class L3AgentContext(OSContextGenerator):
def __call__(self):
ctxt = {}
if config('run-internal-router') == 'leader':
ctxt['handle_internal_only_router'] = eligible_leader(None)
if config('run-internal-router') == 'all':
ctxt['handle_internal_only_router'] = True
| if config('run-internal-router') == 'none':
ctxt['handle_internal_only_router'] = False
if config('external-network-id'):
ctxt['ext_net_id'] = config('external-network-id')
if config('plugin'):
ctxt['plugin'] = config('plugin')
return ctxt
class ExternalPortContext(OSContextGenerator):
def __call__(self):
if config('ext-port'):
return {"ext_port": config('ext-port')}
else:
return N | one
class QuantumGatewayContext(OSContextGenerator):
def __call__(self):
ctxt = {
'shared_secret': get_shared_secret(),
'local_ip': get_host_ip(), # XXX: data network impact
'core_plugin': core_plugin(),
'plugin': config('plugin')
}
return ctxt
class QuantumSharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
def __call__(self):
for rid in relation_ids('shared-db'):
for unit in related_units(rid):
ctxt = {
'database_host': relation_get('db_host', rid=rid,
unit=unit),
'quantum_db': QUANTUM_DB,
'quantum_user': DB_USER,
'quantum_password': relation_get('quantum_password',
rid=rid, unit=unit),
'nova_db': NOVA_DB,
'nova_user': NOVA_DB_USER,
'nova_password': relation_get('nova_password', rid=rid,
unit=unit)
}
if context_complete(ctxt):
return ctxt
return {}
@cached
def get_host_ip(hostname=None):
try:
import dns.resolver
except ImportError:
apt_install('python-dnspython', fatal=True)
import dns.resolver
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
SHARED_SECRET = "/etc/{}/secret.txt"
def get_shared_secret():
secret = None
_path = SHARED_SECRET.format(networking_name())
if not os.path.exists(_path):
secret = str(uuid.uuid4())
with open(_path, 'w') as secret_file:
secret_file.write(secret)
else:
with open(_path, 'r') as secret_file:
secret = secret_file.read().strip()
return secret
|
quarckster/cfme_tests | cfme/tests/infrastructure/test_individual_host_creds.py | Python | gpl-2.0 | 2,699 | 0.002964 | # -*- coding: utf-8 -*-
import pytest
import random
from cfme.utils import conf
from cfme.utils import error
from cfme.infrastructure import host
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.blockers import BZ
from cfme.utils.update import update
pytestmark = [
pytest.mark.tier(3),
pytest.mark.provider([VMwareProvider, RHEVMProvider], scope='module')
]
msgs = {
'virtualcenter': 'Cannot complete login due to an incorrect user name or password.',
'rhevm': 'Login failed due to a bad username or password.'
}
def get_host_data_by_name(provider_key, ho | st_name):
for host_obj in conf.cfme_data.get('management_systems', {})[provider_key].get('hosts', []):
if host_name == host_obj['name']:
return host_obj
return None
# Tests to automate BZ 1278904
@pytest.mark.meta(blockers=[BZ(1516849,
forced_streams=['5.8', '5.9', 'upstream'],
unblock=lambda provider: not provider.one_of(RHEVMProvider))])
def test_host_good_creds(appliance, request, setup_provid | er, provider):
"""
Tests host credentialing with good credentials
"""
test_host = random.choice(provider.data["hosts"])
host_data = get_host_data_by_name(provider.key, test_host.name)
host_collection = appliance.collections.hosts
host_obj = host_collection.instantiate(name=test_host.name, provider=provider)
# Remove creds after test
@request.addfinalizer
def _host_remove_creds():
with update(host_obj):
host_obj.credentials = host.Host.Credential(
principal="", secret="", verify_secret="")
with update(host_obj, validate_credentials=True):
host_obj.credentials = host.get_credentials_from_config(host_data['credentials'])
@pytest.mark.meta(
blockers=[BZ(1310910, unblock=lambda provider: provider.type != 'rhevm')]
)
@pytest.mark.meta(blockers=[BZ(1516849,
forced_streams=['5.8', '5.9', 'upstream'],
unblock=lambda provider: not provider.one_of(RHEVMProvider))])
def test_host_bad_creds(appliance, request, setup_provider, provider):
"""
Tests host credentialing with bad credentials
"""
test_host = random.choice(provider.data["hosts"])
host_collection = appliance.collections.hosts
host_obj = host_collection.instantiate(name=test_host.name, provider=provider)
with error.expected(msgs[provider.type]):
with update(host_obj, validate_credentials=True):
host_obj.credentials = host.get_credentials_from_config('bad_credentials')
|
alissonperez/django-onmydesk | onmydesk/utils.py | Python | mit | 1,501 | 0.000666 | """Module with common utilities to this package"""
import re
from datetime import timedelta
import importlib
def my_import(class_name):
"""
Usage example::
Report = my_import('myclass.models.Report')
model_instance = Report()
model_instance.name = 'Test'
model_instance.save()
:param str class_name: Class name
:returns: Class object
"""
*packs, class_name = class_name.split('.')
try:
module = importlib.import_module('.'.join(packs))
klass = getattr(module, class_name)
return klass
except (ImportError, AttributeError) as e:
msg = 'Could not import "{}" from {}: {}.'.format(
class_name, e.__class__.__name__, e)
raise ImportError(msg)
def str_to_date(value, | reference_date):
'''
Convert a string like 'D-1' to a "reference_date - timedelta(days=1)"
:param str value: String like 'D-1', 'D+1', 'D'...
:param date reference_date: Date to be used as 'D'
:returns: Result date
:rtype: date
'''
n_value = value.strip(' ').replace(' ', '').upper()
if not | re.match('^D[\-+][0-9]+$|^D$', n_value):
raise ValueError('Wrong value "{}"'.format(value))
if n_value == 'D':
return reference_date
elif n_value[:2] == 'D-':
days = int(n_value[2:])
return reference_date - timedelta(days=days)
elif n_value[:2] == 'D+':
days = int(n_value[2:])
return reference_date + timedelta(days=days)
|
tectronics/snapfly | src/launcher.py | Python | gpl-3.0 | 1,632 | 0.007362 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2011
# Drakmail < drakmail@gmail.com >
# NomerUNO < uno.kms@gmail.com >
# Platon Peacel☮ve <platonny@ngs.ru>
# Elec.Lomy.RU <Elec.Lomy.RU@gmail.com>
# ADcomp <david.madbox@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from Queue import Queue
from subprocess import Popen
from debug import logINFO
devnull = open(os.path.devnull, 'w')
q = None
def start():
global q
q = Queue()
def stop():
while not q.empty():
q.get()
q.task_done()
q.join()
def check_programs():
programs = []
while no | t q.empty():
program = q.get()
if program.poll() == None:
programs.append(program)
q.task_done()
for program in programs:
q.put(program)
return True
def launch_command(cmd):
try:
p = Popen(cmd, stdout = devnull, stderr = devnull )
q.put(p)
except OSError, e:
logINFO("unable to e | xecute a command: %s : %s" % (repr(cmd), repr(e) ))
|
Distrotech/yum-utils | plugins/tsflags/tsflags.py | Python | gpl-2.0 | 1,294 | 0.002318 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, | write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# by Panu Matilainen <pmatilai@laiskiainen.org>
from yum.plugins import TYPE_INTERACTIVE
requires_api_version = '2.1'
plugin_type = (TYPE_INTERACTIVE,)
def init_hook(conduit):
parser = conduit.getOptParser()
parser.add_option('--tsflags', dest='tsflags')
def postreposetup_hook(conduit): |
opts, args = conduit.getCmdLine()
conf = conduit.getConf()
if opts.tsflags:
flags = opts.tsflags.split(',')
for flag in flags:
if flag not in conf.tsflags:
conf.tsflags.append(flag)
|
ddboline/pytest | src/_pytest/unittest.py | Python | mit | 8,619 | 0.000348 | """ discovery and running of std-library "unittest" style tests. """
from __future__ import absolute_import, division, print_function
import sys
import traceback
# for transferring markers
import _pytest._code
from _pytest.config import hookimpl
from _pytest.outcomes import fail, skip, xfail
from _pytest.python import transfer_markers, Class, Module, Function
from _pytest.compat import getimfunc
def pytest_pycollect_makeitem(collector, name, obj):
# has unittest been imported and is obj a subclass of its TestCase?
try:
if not issubclass(obj, sys.modules["unittest"].TestCase):
return
except Exception:
return
# yes, so let's collect it
return UnitTestCase(name, parent=collector)
class UnitTestCase(Class):
# marker for fixturemanger.getfixtureinfo()
# to declare that our children do not support funcargs
nofuncargs = True
def setup(self):
cls = self.obj
if getattr(cls, "__unittest_skip__", False):
return # skipp | ed
setup = getattr(cls, "setUpClass", None)
if setup is not None:
setup()
teardown = getattr(cls, "tearDownClass", None)
if teardown i | s not None:
self.addfinalizer(teardown)
super(UnitTestCase, self).setup()
def collect(self):
from unittest import TestLoader
cls = self.obj
if not getattr(cls, "__test__", True):
return
self.session._fixturemanager.parsefactories(self, unittest=True)
loader = TestLoader()
module = self.getparent(Module).obj
foundsomething = False
for name in loader.getTestCaseNames(self.obj):
x = getattr(self.obj, name)
if not getattr(x, "__test__", True):
continue
funcobj = getimfunc(x)
transfer_markers(funcobj, cls, module)
yield TestCaseFunction(name, parent=self, callobj=funcobj)
foundsomething = True
if not foundsomething:
runtest = getattr(self.obj, "runTest", None)
if runtest is not None:
ut = sys.modules.get("twisted.trial.unittest", None)
if ut is None or runtest != ut.TestCase.runTest:
yield TestCaseFunction("runTest", parent=self)
class TestCaseFunction(Function):
nofuncargs = True
_excinfo = None
_testcase = None
def setup(self):
self._testcase = self.parent.obj(self.name)
self._fix_unittest_skip_decorator()
self._obj = getattr(self._testcase, self.name)
if hasattr(self._testcase, "setup_method"):
self._testcase.setup_method(self._obj)
if hasattr(self, "_request"):
self._request._fillfixtures()
def _fix_unittest_skip_decorator(self):
"""
The @unittest.skip decorator calls functools.wraps(self._testcase)
The call to functools.wraps() fails unless self._testcase
has a __name__ attribute. This is usually automatically supplied
if the test is a function or method, but we need to add manually
here.
See issue #1169
"""
if sys.version_info[0] == 2:
setattr(self._testcase, "__name__", self.name)
def teardown(self):
if hasattr(self._testcase, "teardown_method"):
self._testcase.teardown_method(self._obj)
# Allow garbage collection on TestCase instance attributes.
self._testcase = None
self._obj = None
def startTest(self, testcase):
pass
def _addexcinfo(self, rawexcinfo):
# unwrap potential exception info (see twisted trial support below)
rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
try:
excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
except TypeError:
try:
try:
values = traceback.format_exception(*rawexcinfo)
values.insert(
0,
"NOTE: Incompatible Exception Representation, "
"displaying natively:\n\n",
)
fail("".join(values), pytrace=False)
except (fail.Exception, KeyboardInterrupt):
raise
except: # noqa
fail(
"ERROR: Unknown Incompatible Exception "
"representation:\n%r" % (rawexcinfo,),
pytrace=False,
)
except KeyboardInterrupt:
raise
except fail.Exception:
excinfo = _pytest._code.ExceptionInfo()
self.__dict__.setdefault("_excinfo", []).append(excinfo)
def addError(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addFailure(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addSkip(self, testcase, reason):
try:
skip(reason)
except skip.Exception:
self._skipped_by_mark = True
self._addexcinfo(sys.exc_info())
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
try:
xfail(str(reason))
except xfail.Exception:
self._addexcinfo(sys.exc_info())
def addUnexpectedSuccess(self, testcase, reason=""):
self._unexpectedsuccess = reason
def addSuccess(self, testcase):
pass
def stopTest(self, testcase):
pass
def _handle_skip(self):
# implements the skipping machinery (see #2137)
# analog to pythons Lib/unittest/case.py:run
testMethod = getattr(self._testcase, self._testcase._testMethodName)
if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
):
# If the class or method was skipped.
skip_why = getattr(
self._testcase.__class__, "__unittest_skip_why__", ""
) or getattr(testMethod, "__unittest_skip_why__", "")
try: # PY3, unittest2 on PY2
self._testcase._addSkip(self, self._testcase, skip_why)
except TypeError: # PY2
if sys.version_info[0] != 2:
raise
self._testcase._addSkip(self, skip_why)
return True
return False
def runtest(self):
if self.config.pluginmanager.get_plugin("pdbinvoke") is None:
self._testcase(result=self)
else:
# disables tearDown and cleanups for post mortem debugging (see #1890)
if self._handle_skip():
return
self._testcase.debug()
def _prunetraceback(self, excinfo):
Function._prunetraceback(self, excinfo)
traceback = excinfo.traceback.filter(
lambda x: not x.frame.f_globals.get("__unittest")
)
if traceback:
excinfo.traceback = traceback
@hookimpl(tryfirst=True)
def pytest_runtest_makereport(item, call):
if isinstance(item, TestCaseFunction):
if item._excinfo:
call.excinfo = item._excinfo.pop(0)
try:
del call.result
except AttributeError:
pass
# twisted trial support
@hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item):
if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
ut = sys.modules["twisted.python.failure"]
Failure__init__ = ut.Failure.__init__
check_testcase_implements_trial_reporter()
def excstore(
self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
):
if exc_value is None:
self._rawexcinfo = sys.exc_info()
else:
if exc_type is None:
exc_type = type(exc_value)
self._rawexcinfo = (exc_type, exc_value, exc_tb)
try:
Failure__init__(
self, exc_value, exc_type, exc_tb, captureVars=captureVars
)
except TypeError:
|
DataBiosphere/data-explorer | api/data_explorer/models/export_url_request.py | Python | bsd-3-clause | 3,966 | 0.000504 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from data_explorer.models.base_model_ import Model
from data_explorer import util
class ExportUrlRequest(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self,
cohort_name=None,
filter=None,
data_explorer_url=None,
sql_query=None): # noqa: E501
"""ExportUrlRequest - a model defined in Swagger
:param cohort_name: The cohort_name of this ExportUrlRequest. # noqa: E501
:type cohort_name: str
:param filter: The filter of this ExportUrlRequest. # noqa: E501
:type filter: List[str]
:param data_explorer_url: The data_explorer_url of this ExportUrlRequest. # noqa: E501
:type data_explorer_url: str
:param sql_query: The sql_query of this ExportUrlRequest. # noqa: E501
:type sql_query: str
"""
self.swagger_types = {
'cohort_name': str,
'filter': List[str],
'data_explorer_url': str,
'sql_query': str
}
self.attribute_map = {
'cohort_name': 'cohortName',
'filter': 'filter',
'data_explorer_url': 'dataExplorerUrl',
'sql_query': 'sqlQuery'
}
self._cohort_name = cohort_name
self._filter = filter
self._data_explorer_url = data_explorer_url
self._sql_query = sql_query
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The exportUrlRequest of this ExportUrlRequest. # noqa: E501
:rtype: | ExportUrlRequest
"""
return util.deserialize_model(dikt, cls)
@property
def cohort_name(self):
"""Gets the cohort_name of this ExportUrlRequest.
:return: The cohort_name of this ExportUrlRequest.
:rtype: str
"""
return self._cohort_name
@cohort_name.setter
def coho | rt_name(self, cohort_name):
"""Sets the cohort_name of this ExportUrlRequest.
:param cohort_name: The cohort_name of this ExportUrlRequest.
:type cohort_name: str
"""
self._cohort_name = cohort_name
@property
def filter(self):
"""Gets the filter of this ExportUrlRequest.
:return: The filter of this ExportUrlRequest.
:rtype: List[str]
"""
return self._filter
@filter.setter
def filter(self, filter):
"""Sets the filter of this ExportUrlRequest.
:param filter: The filter of this ExportUrlRequest.
:type filter: List[str]
"""
self._filter = filter
@property
def data_explorer_url(self):
"""Gets the data_explorer_url of this ExportUrlRequest.
:return: The data_explorer_url of this ExportUrlRequest.
:rtype: str
"""
return self._data_explorer_url
@data_explorer_url.setter
def data_explorer_url(self, data_explorer_url):
"""Sets the data_explorer_url of this ExportUrlRequest.
:param data_explorer_url: The data_explorer_url of this ExportUrlRequest.
:type data_explorer_url: str
"""
self._data_explorer_url = data_explorer_url
@property
def sql_query(self):
"""Gets the sql_query of this ExportUrlRequest.
:return: The sql_query of this ExportUrlRequest.
:rtype: str
"""
return self._sql_query
@sql_query.setter
def sql_query(self, sql_query):
"""Sets the sql_query of this ExportUrlRequest.
:param sql_query: The sql_query of this ExportUrlRequest.
:type sql_query: str
"""
self._sql_query = sql_query
|
blmousee/pyctp | example/pyctp/dac2.py | Python | mit | 22,134 | 0.0188 | # -*-coding:utf-8 -*-
'''
dac的@indicator版本
提供更简单的使用方式和实现方式
'''
import operator
from collections import (
deque,
)
from .base import (
BaseObject,
fcustom,
indicator,
icache,
t2order_if,
)
from .dac import (
XBASE, #100
CBASE, #XBASE*XBASE,用于XATR
FBASE, #10
)
XBASE = 100 #整数运算的放大倍数
CBASE = XBASE * XBASE #XATR倍数
FBASE = 10 #整数运算的放大倍数2
##########
#编写指标时,请务必确保这个判断中的标识字符串和下面赋值的名称的一致性,否则会每次都赋值和计算,内存立马挂掉
# if not hasattr(_ts,'ss'):
# _ts.ss = []
#
##########
###############
# 基本序列运算
#
###############
@indicator
def OPER1(source,oper,_ts=None):
'''
单参数序列运算
'''
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source)):
#print 'new data:',source[i]
_ts.ss.append(oper(source[i]))
return _ts.ss
'''
不同的operator.xxx, 使OPER1下缓存的key不同,不会导致混淆
'''
NEG = fcustom(OPER1,oper=operator.neg)
ABS = fcustom(OPER1,oper=operator.abs)
NOT = fcustom(OPER1,oper=operator.not_)
@indicator
def OPER2(source1,source2,oper,_ts=None):
'''
双参数序列运算
'''
assert len(source1) == len(source2),'len(source1) != len(source2)'
if not _ts.initialized:
_ts.initialized = True
#print 'new oper2 ss'
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i],source2[i]
_ts.ss.append(oper(source1[i],source2[i]))
return _ts.ss
ADD = fcustom(OPER2,oper=operator.add)
SUB = fcustom(OPER2,oper=operator.sub)
MUL = fcustom(OPER2,oper=operator.mul)
#AND = fcustom(OPER2,oper=operator.and_) #这个是位操作
#OR = fcustom(OPER2,oper=operator.or_) #这个是位操作
#XOR = fcustom(OPER2,oper=operator.xor) #这个是位操作
LT = fcustom(OPER2,oper=operator.lt)
LE = fcustom(OPER2,oper=operator.le)
EQ = fcustom(OPER2,oper=operator.eq)
GT = fcustom(OPER2,oper=operator.gt)
GE = fcustom(OPER2,oper=operator.ge)
@indicator
def OPER21(source1,vs,oper,_ts=None):
'''
双参数运算,第一个为序列,第二个为数值
'''
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i]
_ts.ss.append(oper(source1[i],vs))
return _ts.ss
ADD1 = fcustom(OPER21,oper=operator.add)
SUB1 = fcustom(OPER21,oper=operator.sub)
MUL1 = fcustom(OPER21,oper=operator.mul)
#AND1 = fcustom(OPER21,oper=operator.and_) #这个是位操作
#OR1 = fcustom(OPER21,oper=operator.or_) #这个是位操作
#XOR1 = fcustom(OPER21,oper=operator.xor) #这个是位操作
LT1 = fcustom(OPER21,oper=operator.lt)
LE1 = fcustom(OPER21,oper=operator.le)
EQ1 = fcustom(OPER21,oper=operator.eq)
GT1 = fcustom(OPER21,oper=operator.gt)
GE1 = fcustom(OPER21,oper=operator.ge)
@indicator
def AND(source1,source2,_ts=None):
'''
双序列参数AND运算
'''
assert len(source1) == len(source2),'len(source1) != len(source2)'
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i],source2[i]
_ts.ss.append((source1[i] and source2[i])!=0)
return _ts.ss
@indicator
def GAND(_ts=None,*args):
assert len(args)>0,'GAND params number less than 1'
if not _ts.initialized:
_ts.initialized = True
_ts.ga = []
for i in range(len(_ts.ga),len(args[0])):
rv = all([vs[i] for vs in args])
_ts.ga.append(rv!=0)
return _ts.ga
@indicator
def GOR(_ts=None,*args):
assert len(args)>0,'GOR params number less than 1'
#print 'ts=%s,args=%s' % (_ts,args)
if not _ts.initialized:
_ts.initialized = True
_ts.gor = []
for i in range(len(_ts.gor),len(args[0])):
rv = any([vs[i] for vs in args])
_ts.gor.append(rv!=0)
return _ts.gor
#GAND = fcustom(GOPER,oper=all) #有可变参数时,就不能再有_ts之外的参数用fcustom指定默认值
#GOR = fcustom(GOPER,oper=any) #有可变参数时,就不能再有_ts之外的参数用fcustom指定默认值
@indicator
def DIV(source1,source2,_ts=None):
'''
序列除法
'''
assert len(source1) == len(source2),'len(source1) != len(source2)'
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i],source2[i]
r = (source1[i]+source2[i]/2)/source2[i] if source2[i] != 0 else source1[i]*1000
_ts.ss.append(r)
return _ts.ss
@indicator
def DIV1(source1,vs,_ts=None):
'''
序列除常数
'''
assert vs!= | 0,'divisor vs == 0'
if not _ts.initialized:
_ts.initialized = True
_ts.ss = []
for i in range(len(_ts.ss),len(source1)):
#print 'new data:',source1[i]
_ts.ss.append((source1[i]+vs/2)/vs)
return _ts.ss
############
# 常用指标
#
############
@indicator
def ACCUMULATE(source,_ts=None):
'''
累加
'''
if not _ts.initialized: |
_ts.initialized = True
_ts.sa = []
ss = _ts.sa[-1] if _ts.sa else 0
for i in range(len(_ts.sa),len(source)):
ss += source[i]
_ts.sa.append(ss)
#print id(_ts),id(source),source,_ts.sa
return _ts.sa
NSUM = ACCUMULATE
@indicator
def MSUM(source,mlen,_ts=None):
'''
移动求和
'''
if not _ts.initialized:
_ts.initialized = True
_ts.ms = []
ss = ACCUMULATE(source)
for i in range(len(_ts.ms),len(source)):
v = ss[i] - ss[i-mlen] if i>=mlen else ss[i]
_ts.ms.append(v)
return _ts.ms
@indicator
def MA(source,mlen,_ts=None):
'''
移动平均. 使用MSUM
使用方式:
rev = MA(source,13) #返回source的13期移动平均
当序列中元素个数<mlen时,结果序列为到该元素为止的所有元素值的平均
'''
assert mlen>0,u'mlen should > 0'
if not _ts.initialized:
_ts.initialized = True
_ts.ma = []
ms = MSUM(source,mlen)
for i in range(len(_ts.ma),len(source)):
#当累计个数<nlen时,求其平均值,而不是累计值/mlen
rlen = mlen if i>=mlen else i+1
_ts.ma.append((ms[i]+rlen/2)/rlen)
return _ts.ma
@indicator
def MA_2(source,mlen,_ts=None):
'''
移动平均. 直接计
使用方式:
rev = MA(source,13) #返回source的13期移动平均
当序列中元素个数<mlen时,结果序列为到该元素为止的所有元素值的平均
'''
assert mlen>0,u'mlen should > 0'
if not _ts.initialized:
_ts.initialized = True
_ts.sa = [0]*mlen #哨兵
_ts.ma = []
slen = len(_ts.ma)
ss = _ts.sa[-1]
for i in range(slen,len(source)):
ss += source[i]
_ts.sa.append(ss)
#print ss,_ts.sa[i-mlen]
#当累计个数<nlen时,求其平均值,而不是累计值/mlen
rlen = mlen if mlen < i+1 else i+1
_ts.ma.append((ss-_ts.sa[-rlen-1]+rlen/2)/rlen)
#print _ts.sa
return _ts.ma
@indicator
def NMA(source,_ts=None):
'''
总平均
使用方式:
rev = MA(source) #返回source的当期及之前的平均值
'''
if not _ts.initialized:
_ts.initialized = True
_ts.sa = [0] #哨兵
_ts.nma = []
#print 'initial NMA'
slen = len(_ts.nma)
ss = _ts.sa[-1]
for i in range(slen,len(source)):
ss += source[i]
_ts.sa.append(ss)
#print ss,_ts.sa[-1]
_ts.nma.append((ss+(i+1)/2)/(i+1))
#print _ts.sa
return _ts.nma
@indicator
def CEXPMA(source,mlen,_ts=None):
assert mlen>0,u'mlen should > 0'
if len(source) == 0:#不计算空序列,直接返回
return []
if not _ts.initialized:
_ts.initialized = True
#print 'new cexpma ema'
_ts.ema = [source[0]] #哨兵元素是source[0],确保计算得到的值在<mlen元素的情况下也正确
cur = _ts.ema[-1]
for i in range(len(_ts.ema),len(source)):
cur = (source[i]*2 + cur*(mlen-1) + (mlen+1)/2)/(mlen+1)
_ts.ema.append(cur)
return _ts.ema
EMA = CEXPMA
@indicator
def MACD(source,ifast=12,islow=26,idiff=9,_ts=None):
if not _ts.initialized:
_ts.initialized = True
_ts.diff = []
|
silverbp/master-builder | mb/lib/ioc.py | Python | apache-2.0 | 4,431 | 0.004288 | from __future__ import absolute_import
from __future__ import unicode_literals
import glob
import imp
import inspect
import os
import sys
from mb.config.config import get_default_config_file
from mb.lib import logger
from mb.lib import process
_log = logger.get_logger('[Ioc]')
# plugin types
from mb import build_context # BuildContext # NOQA
from mb import command # Command # NOQA
from mb import template_engine # TemplateEngine # NOQA
from mb import version_scheme #VersionScheme # NOQA
from mb.config.config import PluginConfig # NOQA
def rchop(thestring, ending):
if thestring.endswith(ending):
return thestring[:-len(ending)]
return thestring
def _is_plugin_type(object_attr, plugin_type):
try:
if object_attr == plugin_type:
return False
return issubclass(object_attr, plugin_type)
except:
return False
_plugin_modules = [build_context, command, template_engine, version_scheme]
_plugin_types = [build_context.BuildContext, command.Command, template_engine.TemplateEngine, version_scheme.VersionScheme]
_loaded_plugin_definitions = {}
_plugin_instances = {}
_config = get_default_config_file()
if os.path.isdir(_config.plugin_dir):
os.chdir(_config.plugin_dir)
for file in glob.glob("*.py"):
plugin_module_name_template = "silverbp_mb_plugin_" + os.path.splitext(file)[0] + "_%d"
for plugin_name_suffix in range(len(sys.modules)):
plugin_module_name = plugin_module_name_template % plugin_name_suffix
if plugin_module_name not in sys.modules:
break
with open(file, "r") as plugin_file:
_plugin_modules.append(imp.load_module(plugin_module_name, plugin_file, file, ("py", "r", imp.PY_SOURCE)))
for module in _plugin_modules:
for module_attr in (getattr(module, name) for name in dir(module)):
for plugin_type in _p | lugin_types:
if not _is_plugin_type(module_attr, plugin_type):
continue
_loaded_plugin_definitions[module_attr.__name__] = module_attr
_defined_commands = _config.commands
_defined_commands['_prerun'] = PluginConfig('MBPreRunCommand', {}, _config)
command_plugins = [k for (k, v) in _loaded_plugin_definitions.items() if _is_plugin_type(v, command.Command)]
for (k, v) in _config. | commands.items():
if v.name not in command_plugins:
_log.warn('The following Command: {0} was not found and will not be available'.format(k))
del _defined_commands[k]
_log.debug('The following commands will be available: {0}'.format([k for (k, v) in _defined_commands.items() if not k.startswith('_')]))
def _load_plugin(plugin):
if plugin.name in _plugin_instances.keys():
return _plugin_instances[plugin.name]
plugin_definition = _loaded_plugin_definitions[plugin.name]
arguments = []
# if the plugin doesn't have a constructor, there's nothing to inject
if '__init__' in getattr(plugin_definition, '__dict__', None).keys():
for arg in inspect.getargspec(plugin_definition.__init__)[0][1:]:
arguments.append(load_dependency(arg))
instance = plugin_definition(*arguments)
available_properties = [x for x, y in inspect.getmembers(instance.__class__, lambda x: isinstance(x, property))]
for (key, value) in plugin.config.items():
if key in available_properties:
try:
setattr(instance, key, value)
except Exception as err:
_log.warn('There was a problem setting the plugin config: \'{0}\' on \'{1}\' with \'{2}\'.'.format(plugin.name, key, value))
_log.debug('Exception occured while trying to set a plugin config value: {0}'.format(err))
else:
_log.warn('The following plugin config: {0}, is not an option to set on {1}'.format(key, plugin.name))
_plugin_instances[plugin.name] = instance
return instance
def load_dependency(name):
if name == 'config':
return _config
if name == 'process':
return process
return _load_plugin(getattr(_config, name))
def get_commands():
return [k for (k, v) in _defined_commands.items() if not k.startswith('_')]
def load_command(name):
if name in _defined_commands.keys():
plugin = _defined_commands[name]
else:
raise StandardError('The following command: {0} is not available'.format(name))
return _load_plugin(plugin)
|
davehorton/drachtio-server | deps/boost_1_77_0/libs/mpi/test/python/scatter_test.py | Python | mit | 1,318 | 0.008346 | # Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test scatter() collective.
from __future__ import print_function
import mpi
from generators import *
def scatter_test(comm, generator, kind, root):
if comm.rank == root:
print ("Scattering %s from root %d..." % (kind, root)),
if comm.rank == root:
values = list()
for p in range(0, comm.size):
values.append(generator(p))
result = mpi.scatter(comm, values, root = root)
else:
result = mpi.scatter(comm, root = root);
assert result == generator(comm.rank)
if comm.rank == root: print ("OK.")
return
scatter_test(mpi.world, int_generator, "integers", 0)
scatter_test(mpi.world, int_generator, "integers", 1)
scatter_test(mpi.world, gps_generator, "GPS positions", 0)
scat | ter_test(mpi.world, gps_generator, "GPS positions", 1)
scatter_test(mpi.world, string_generator, "strings", 0)
scatter_test(mpi.world, string_generator, "strings", 1)
scatter_test(mpi.world, string_list_generator, "li | st of strings", 0)
scatter_test(mpi.world, string_list_generator, "list of strings", 1)
|
VJalili/OIDCProvider | config.py | Python | lgpl-3.0 | 2,954 | 0.003047 | PORT = 8040
ISSUER = 'https://localhost' # do not include the port, it will be added in the code.
SERVICEURL = "{issuer}verify" # do not manually add issuer or port number, these will be added in the code.
SERVER_CERT = "certification/server.crt"
SERVER_KEY = "certification/server.key"
CERT_CHAIN = None
AUTHENTICATION = {
"UserPassword":
{
"ACR": "PASSWORD",
"WEIGHT": 1,
"URL": SERVICEURL,
"EndPoints": ["verify"],
}
}
CLIENTDB = 'ClientDB'
SYM_KEY = "SoLittleTime,Got" # used for Symmetric key authentication only.
COOKIENAME = 'pyoic'
COOKIETTL = 4 * 60 # 4 hours
USERINFO = "SIMPLE"
USERDB = {
"user1": {
"sub": "sub1",
"name": "name1",
"given_name": "givenName | 1",
"family_name": "familyName1",
"nickname": "nickname1",
"email": "email1@example.org",
"email_verified": False,
"phone_number": "+984400000000",
"address": {
"street_address": "address1",
"locality": "locality1",
"postal_code": "5719800000",
"country": "Iran"
},
},
"user2": {
"sub": "sub2",
"name": "name2",
"given_name": "givenName2", |
"family_name": "familyName2",
"nickname": "nickname2",
"email": "email2@example.com",
"email_verified": True,
"address": {
"street_address": "address2",
"locality": "locality2",
"region": "region2",
"postal_code": "5719899999",
"country": "Iran",
},
}
}
# This is a JSON Web Key (JWK) object, and its members represent
# properties of the key and its values.
keys = [
{"type": "RSA", "key": "cryptography_keys/key.pem", "use": ["enc", "sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["enc"]}
# "type" or "kty" identifies the cryptographic algorithm family used with the key.
# The kty values are case sensitive. The kty values should either be registered
# in the IANA "JSON Web Key Types" registery or be a value that contains a
# Collision-Resistant Name. For more info on kty values refer to:
# https://tools.ietf.org/html/rfc7518
#
# Cryptography keys are: private and public keys.
# Keys are encrypted with RSA algorithm, and are stored in separate files in RSA.
#
# use (Public Key Use) parameter identifies the intended use of the public key.
# This parameter is employed to indicate whether a public key is used for encryption
# data or verifying the signature on data. Values defined by this specification are:
# enc (encryption), sig (signature)
#
#
# "RSA" (a public key cryptography), see:
# http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
#
# "EC": Elliptic Curve, see:
# http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
]
|
timlinux/fabgis | docs/source/conf.py | Python | lgpl-2.1 | 10,614 | 0.005653 | # -*- coding: utf-8 -*-
#
# FabGIS documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 2 22:29:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..')
)
)
# So we can get the version from setup.py
from setup import setup
print sys.path
# ...
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FABGIS'
copyright = u'2013, Tim Sutton, Werner Macho'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
setup_version = setup[version]
tokens = setup_version.split['.']
version = '%s.%s' % (tokens[0], tokens[1])
# The full version, including alpha/beta/rc tags.
release = setup_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# Activate the theme.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
html_logo = "logo_small.png"
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "FABGIS",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Go",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("GitHub Project", "https://github.com/timlinux/fabgis"),
("Linfiniti", "http://linfiniti.com", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false" |
'navbar_fixed_top': "true",
# Location of link to s | ource.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "slate",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will |
srishtyagrawal/database-iblt | avg.py | Python | mit | 113 | 0.035398 | import sys
#!/usr/bin/python
f = open(str(sys.argv[1]),'r' | )
sum = 0
for i in f:
sum += int(i)
| print sum
|
avanzosc/avanzosc6.1 | avanzosc_product_default_location/wizard/__init__.py | Python | agpl-3.0 | 1,106 | 0.003617 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2013 AvanzOSC (Daniel). All Rights Reserved
# Date: 25/09/2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, e | ither version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; witho | ut even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import wiz_product_default_location
import wiz_product_default_location_line
import wizard_pdl_assign |
remmihsorp/minicps | scripts/pox/l2_learning.py | Python | mit | 5,918 | 0.00338 | """
Flow based learing switch
l2_learning listens to openflow events and uses a dedicated
class called LearningSwitch to manage the Switch learning logic
eg: flood, flow_mod, drop
Learn:
how to create a dedicate controller class
how to register a pox component to the core object
how to drop a packet
how to flood multicast packets (eg: ARP request uses 00:00:00:00:00:00)
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
# dpid helper functions
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_bool
import time
log = core.getLogger()
_flood_delay = 0 # sec
class LearningSwitch(object):
"""
"""
def __init__(self, connection, transparent):
"""
:transparent: TODO
:hold_down_expired: static flag that signal when
_flood_delay equals 0
"""
self.connection = connection
self.transparent = transparent
# controller MACs table
self.macToPort = {}
# Every _handle_EventName (subsribers) will be mapped to
# EventName (raised event)
connection.addListeners(self)
# Bool to track a timer
self.hold_down_expired = (_flood_delay == 0)
log.debug("Initializing LearningSwitch, transparent=%s",
str(self.transparent))
def _handle_PacketIn(self, event):
"""
Manage PacketIn events sent by event,connection datapaths.
"""
packet = event.parsed
def flood(message=None):
"""
create a packet_out with flood rule
waiting _flood_delay sec before sending the instruction to the switch
:message: optional log.debug message
"""
msg = of.ofp_packet_out() # create of_packet_out
# flood
if time.time() - self.connection.connect_time >= _flood_delay:
if self.hold_down_expired is False:
self.hold_down_expired = True
log.info("%s: Flood hold_down expired -- flooding",
dpid_to_str(event.dpid))
if message is not None: log.debug(message)
log.debug("%i: flood %s -> %s" % (event.dpid, packet.src, packet.dst))
action = of.ofp_action_output(port=of.OFPP_FLOOD)
msg.actions.append(action)
# wait
else:
log.info("Holding down flood for %s" % (dpid_to_str(event.dpid)))
pass
msg.data = event.ofp
msg.in_port = event.port
self.connection.send(msg)
def drop(duration=None):
"""TODO: Docstring for drop.
"""
if duration is not None:
if not isinstance(duration, tuple): # idle_timeout, hard_timeout
duration = (duration, duration)
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet)
msg.idle_timeout = duration[0]
msg.hard_timeout = duration[1]
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
elif event.ofp.buffer_id is not None:
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
self.macToPort[packet.src] = event.port
if not self.transparent:
if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered():
drop()
return
if packet.dst.is_multicast:
flood()
else:
if packet.dst not in self.macToPort:
flood("Port from %s unknown -- flooding" % (packet.dst))
else:
port = self.macToPort[packet.dst]
if port == event.port:
log.warning("Same port for packet from %s -> %s on %s.%s. Drop."
% (packet.src, packet.dst, dpid_to_str(event.dpid), port))
drop(10)
return
log.debug("installing flow for %s.%i -> %s.%i"
% (packet.src, event.port, packet.dst, port))
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet, event.port)
msg.idle_timeout = 10
msg.hard_timeout = 30
action = of.ofp_action_output(port=port)
msg.actions.append(action)
msg.data = event.ofp
self.connection.send(msg)
class l2_learning(object):
"""
useless l2_learning pox component
"""
def __init__(self, transparent):
"""TODO: to be defined1.
:transparent: passed through command line
"""
# l2_learning methods subscribes to all events r | aised by nexus
core.openflow.addListeners(self)
self.transparent = transparent
def _handle_ConnectionUp(self, event):
"""
Event fired once the controller is connected
"""
log.debug("Connection %s" % (event.connection))
LearningSwitch(event.connection, self.transparent)
def launch(transparent=False, hold_down=_flood_delay):
| """
launch argument are parsed from command line
"""
try:
global _flood_delay
_flood_delay = int(str(hold_down), 10) # base 10 conversion
assert _flood_delay >= 0
except:
raise RuntimeError("Expected hold_down to be a number.")
# create an instance of a l2_learning class
# passing transparent to its constructor
# assigning the classname as component name
# eg: core.l2_learning
core.registerNew(l2_learning, str_to_bool(transparent))
|
datamade/python-legistar-scraper | scripts/guessdomains.py | Python | bsd-3-clause | 4,242 | 0.001886 | import re
import os
import csv
import time
import logging
import logging.config
from os.path import join
import scrapelib
path = '/home/thom/sunlight/python-opencivicdata/opencivicdata/division-ids/identifiers/country-us'
class Checker(scrapelib.Scraper):
OUTFILE = 'domains.csv'
SCRAPELIB_RPM = 10
SCRAPELIB_TIMEOUT = 60
SCRAPELIB_RETRY_ATTEMPTS = 0
SCRAPELIB_RETRY_WAIT_SECONDS = 20
FASTMODE = True
# PROXIES = dict(http="http://localhost", https='https://localhost')
BOGUS_DOMAIN_MESSAGE = 'Invalid parameters!!'
def __init__(self):
super().__init__()
self.checked_places = set()
logging.config.dictConfig(self.LOGGING_CONFIG)
self.logger = logging.getLogger('legistar')
# scrapelib setup
| self.timeout = self.SCRAPELIB_TIMEOUT
self.requests_per_minute = self.SCRAPELIB_RPM
self.retry_attempts = self.SCRAPELIB_RETRY_ATTEMPTS
self.retry_wait_seconds = self.SCRAPELIB_RETRY_WAIT_ | SECONDS
self.follow_robots = False
# if self.PROXIES:
# self.proxies = self.PROXIES
if self.FASTMODE:
self.cache_write_only = False
cache_dir = '.cache'
self.cache_storage = scrapelib.FileCache(cache_dir)
def __enter__(self):
self.outfile = open(self.OUTFILE, 'w')
self.writer = csv.writer(self.outfile)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.outfile.close()
def check_all(self):
for dr, subdrs, filenames in os.walk(path):
for filename in filenames:
if 'school' in filename:
continue
if not filename.endswith('.csv'):
continue
self.current_file = filename
self.logger.warning('Starting file: %r' % filename)
with open(join(dr, filename), 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
self.row = row
self.check_row()
def check_row(self):
if not self.row:
return
self.ocdid = self.row[0]
for piece in self.ocdid.split('/'):
if ':' not in piece:
continue
_, place = piece.split(':')
place = re.sub('[a-z]+[\d\-]+', '', place)
place = re.sub('[\d\-]+', '', place)
self.place = self.sluggify(place)
self.check_place()
def check_place(self):
if self.place in self.checked_places:
return
if not self.place:
return
if '.' in self.place:
return
if len(self.place) < 2:
return
self.url = 'http://%s.legistar.com' % self.place
self.logger.debug('Checking %r ...' % self.url)
resp = self.get(self.url)
self.checked_places.add(self.place)
if resp.text.strip() != self.BOGUS_DOMAIN_MESSAGE:
self.process_hit()
return True
def process_hit(self):
self.logger.warning('HIT: %r' % self.url)
self.logger.warning('HIT: %r' % self.ocdid)
data = [self.url]
self.writer.writerow(data)
self.outfile.flush()
def sluggify(self, text):
return text.replace('_', '').replace('~', '').lower()
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': "%(asctime)s %(levelname)s %(name)s: %(message)s",
'datefmt': '%H:%M:%S'
}
},
'handlers': {
'default': {'level': 'DEBUG',
'class': 'legistar.utils.ansistrm.ColorizingStreamHandler',
'formatter': 'standard'},
},
'loggers': {
'legistar': {
'handlers': ['default'], 'level': 'DEBUG', 'propagate': False
},
'requests': {
'handlers': ['default'], 'level': 'DEBUG', 'propagate': False
},
},
}
if __name__ == '__main__':
with Checker() as checker:
checker.check_all()
|
froyobin/horizon | openstack_dashboard/test/test_data/sahara_data.py | Python | apache-2.0 | 17,441 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language | governing permissions and limitations
# under the License.
from openstack_dashboard.test.test_data import utils
from saharaclient.api import cluster_templates
from saharaclient.api import clusters
from saharaclient.api import data_sources
from saharaclient.api import job_binaries
from saharaclient.api import job_executions
from saharaclient.api import jobs
from saharaclient.api import node_group_templates
from saharaclient.api import plugins
def data(TEST):
TEST.plugins = utils.TestDataContainer()
TEST.plugins_configs = utils.TestDataContainer()
TEST.nodegroup_templates = utils.TestDataContainer()
TEST.cluster_templates = utils.TestDataContainer()
TEST.clusters = utils.TestDataContainer()
TEST.data_sources = utils.TestDataContainer()
TEST.job_binaries = utils.TestDataContainer()
TEST.jobs = utils.TestDataContainer()
TEST.job_executions = utils.TestDataContainer()
plugin1_dict = {
"description": "vanilla plugin",
"name": "vanilla",
"title": "Vanilla Apache Hadoop",
"versions": ["2.3.0", "1.2.1"]
}
plugin1 = plugins.Plugin(plugins.PluginManager(None), plugin1_dict)
TEST.plugins.add(plugin1)
plugin_config1_dict = {
"node_processes": {
"HDFS": [
"namenode",
"datanode",
"secondarynamenode"
],
"MapReduce": [
"tasktracker",
"jobtracker"
]
},
"description": "This plugin provides an ability to launch vanilla "
"Apache Hadoop cluster without any management "
"consoles.",
"versions": [
"1.2.1"
],
"required_image_tags": [
"vanilla",
"1.2.1"
],
"configs": [
{
"default_value": "/tmp/hadoop-${user.name}",
"name": "hadoop.tmp.dir",
"priority": 2,
"config_type": "string",
"applicable_target": "HDFS",
"is_optional": True,
"scope": "node",
"description": "A base for other temporary directories."
},
{
"default_value": True,
"name": "hadoop.native.lib",
"priority": 2,
"config_type": "bool",
"applicable_target": "HDFS",
"is_optional": True,
"scope": "node",
"description": "Should native hadoop libraries, if present, "
"be used."
},
],
"title": "Vanilla Apache Hadoop",
"name": "vanilla"
}
TEST.plugins_configs.add(plugins.Plugin(plugins.PluginManager(None),
plugin_config1_dict))
# Nodegroup_Templates.
ngt1_dict = {
"created_at": "2014-06-04 14:01:03.701243",
"description": None,
"flavor_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"floating_ip_pool": None,
"hadoop_version": "1.2.1",
"id": "c166dfcc-9cc7-4b48-adc9-f0946169bb36",
"image_id": None,
"name": "sample-template",
"node_configs": {},
"node_processes": [
"namenode",
"jobtracker",
"secondarynamenode",
"hiveserver",
"oozie"
],
"plugin_name": "vanilla",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": None,
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
}
ngt1 = node_group_templates.NodeGroupTemplate(
node_group_templates.NodeGroupTemplateManager(None), ngt1_dict)
TEST.nodegroup_templates.add(ngt1)
# Cluster_templates.
ct1_dict = {
"anti_affinity": [],
"cluster_configs": {},
"created_at": "2014-06-04 14:01:06.460711",
"default_image_id": None,
"description": None,
"hadoop_version": "1.2.1",
"id": "a2c3743f-31a2-4919-8d02-792138a87a98",
"name": "sample-cluster-template",
"neutron_management_network": None,
"node_groups": [
{
"count": 1,
"created_at": "2014-06-04 14:01:06.462512",
"flavor_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"floating_ip_pool": None,
"image_id": None,
"name": "master",
"node_configs": {},
"node_group_template_id": "c166dfcc-9cc7-4b48-adc9",
"node_processes": [
"namenode",
"jobtracker",
"secondarynamenode",
"hiveserver",
"oozie"
],
"updated_at": None,
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
},
{
"count": 2,
"created_at": "2014-06-04 14:01:06.463214",
"flavor_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"floating_ip_pool": None,
"image_id": None,
"name": "workers",
"node_configs": {},
"node_group_template_id": "4eb5504c-94c9-4049-a440",
"node_processes": [
"datanode",
"tasktracker"
],
"updated_at": None,
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
}
],
"plugin_name": "vanilla",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": None
}
ct1 = cluster_templates.ClusterTemplate(
cluster_templates.ClusterTemplateManager(None), ct1_dict)
TEST.cluster_templates.add(ct1)
# Clusters.
cluster1_dict = {
"anti_affinity": [],
"cluster_configs": {},
"cluster_template_id": "a2c3743f-31a2-4919-8d02-792138a87a98",
"created_at": "2014-06-04 20:02:14.051328",
"default_image_id": "9eb4643c-dca8-4ea7-92d2-b773f88a8dc6",
"description": "",
"hadoop_version": "1.2.1",
"id": "ec9a0d28-5cfb-4028-a0b5-40afe23f1533",
"info": {},
"is_transient": False,
"management_public_key": "fakekey",
"name": "cercluster",
"neutron_management_network": None,
"node_groups": [
{
"count": 1,
"created_at": "2014-06-04 20:02:14.053153",
"flavor_id": "0",
"floating_ip_pool": None,
"image_id": None,
"instances": [
{
"created_at": "2014-06-04 20:02:14.834529",
"id": "c3b8004b-7063-4b99-a082-820cdc6e961c",
"instance_id": "a45f5495-4a10-4f17-8fae",
"instance_name": "cercluster-master-001",
"internal_ip": None,
"management_ip": None,
"updated_at": None,
"volumes": []
}
],
"name": "master",
"node_configs": {},
"node_group_template_id": "c166dfcc-9cc7-4b48-adc9",
"node_processes": [
"namenode",
"jobtracker",
"secondarynamenode",
"hiveserver",
|
elbadawyy/Hash-POS | API/Helper/Database.py | Python | lgpl-3.0 | 4,331 | 0.051951 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import ConfigParser
import sqlite3
from sqlite3 import Error
import os
from abc import ABCMeta, abstractmethod
curr_dir = os.path.dirname(os.path.realpath(__file__))
api_dir = os.path.abspath(os.path.join(curr_dir, '..'))
root_dir = os.path.abspath(os.path.join(api_dir, '..'))
config_dir = os.path.abspath(os.path.join(root_dir, 'Config'))
config_file = os.path.abspath(os.path.join(config_dir, 'DB.conf'))
class API:
@staticmethod
def importDBVars():
global db_dir
global db_name
config = ConfigParser.ConfigParser()
#Some Validations Goes Here To Check The Configration File
config.read(config_file)
db_dir = config.get('DataBaseConfig', 'DataBaseDirPath')
db_name = config.get('DataBaseConfig', 'DataBaseName')
@staticmethod
def addEntry(table, field_tuple, value_tuple):
API.importDBVars()
os.chdir(db_dir)
try:
conn = sqlite3.connect(db_name)
except Error as e:
print(e)
field_tuple=str(field_tuple)
value_tuple=str(value_tuple)
print 'Connection Succeed With {}'.format(db_name)
query="INSERT INTO "+table+" "+field_tuple+" VALUES "+value_tuple+""
conn.execute(query)
conn.commit()
conn.close()
@staticmethod
def modEntry(table ,id , field, value):
API.importDBVars()
os.chdir(db_dir)
try:
conn = sqlite3.connect(db_name)
except Error as e:
print(e)
id=str(id)
print 'Connection Succeed With {}'.format(db_name)
query="UPDATE "+table+" SET "+field+" = '"+value+"' WHERE id ='"+id+"'"
conn.execute(query)
conn.commit()
conn.close()
@staticmethod
def delEntry(table, id):
API.importDBVars()
os.chdir(db_dir)
try:
conn = sqlite3.connect(db_name)
except Error as e:
print(e)
print 'Connection Succeed With {}'.format(db_name)
id=str(id)
query="DELETE FROM "+table+" WHERE (id = '"+id+"')"
conn.execute(query)
conn.commit()
conn.close()
@staticmethod
def resolvNameToID(table, name):
API.importDBVars()
os.chdir(db_dir)
try:
conn = sqlite3.connect(db_name)
except Error as e:
print(e)
print 'Connection Succeed With {}'.format(db_name)
query = "SELECT id FROM "+table+" WHERE name = '"+name+"'"
cur = conn.cursor()
cur.execute(query)
conn.commit()
id = cur.fetchone()
for i in id:
id = i
return id
conn.close()
@staticmethod
def listFieldVals(table, field):
API.importDBVars()
os.chdir(db_dir)
try:
conn = sqlite3.connect(db_name)
except Error as e:
print(e)
print 'Connection Succeed With {}'.format(db_name)
query = "SELECT "+field+" FROM "+table+""
cur = conn.cursor()
cur.execute(query)
conn.commit()
id = cur.fetchall()
res=[]
for i in id:
res.append(i[0])
return res
conn.close()
@staticmethod
def chkIdExist(table, id):
API.importDBVars()
os.chdir(db_dir)
try:
conn = sqlite3.connect(db_name)
except Error as e:
print(e)
print 'Connection Succeed With {}'.format(db_name)
query="SELECT * FROM "+table+" WHERE id ='"+str(id)+"'"
cur = conn.c | ursor()
cur.execute(query)
conn.commit()
res = cur.fetchall()
return res
class Table:
__metaclass__ = ABCMeta
table=""
field_tuple=()
def add(self, value_tuple):
errcode=""
if not( | value_tuple) or not (value_tuple[0]) or(type(value_tuple) is not tuple) or (len(self.field_tuple) != len(value_tuple)):
errcode="255"
return errcode
API.addEntry(self.table, self.field_tuple,value_tuple)
errcode="0"
return errcode
def modify(self, id, field, value):
errcode=""
id=int(id)
if not (id) or(type(id) is not int) or (id < 0) or not (field) or(type(field) is not str) or not (value):
errcode="255"
return errcode
API.modEntry(self.table ,id, field, value)
errcode="0"
return errcode
def delete(self, id):
errcode=""
id=int(id)
if not(id) or (id < 0) or(type(id) is not int):
errcode="255"
return errcode
API.delEntry(self.table, id)
errcode="0"
return errcode
def chkIdExistInTable(self, id,table_name):
errcode=""
id=int(id)
if not(id) or (id < 0) or(type(id) is not int):
errcode="255"
return errcode
if(API.chkIdExist(table_name, id) > 0):
errcode="0"
return errcode
else:
errcode="255" #NotExist
return errcode
@abstractmethod
def retTableName(self):
pass
|
fe11x/django-oscar-alipay | alipay/warrant/__init__.py | Python | bsd-3-clause | 198 | 0.021739 | ##### | ################################################### | ###############################
#支付宝担保交易
####################################################################################### |
mathstuf/bodhi | bodhi/services/releases.py | Python | gpl-2.0 | 6,860 | 0.002041 | # This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import math
from cornice import Service
from pyramid.exceptions import HTTPNotFound
from sqlalchemy import func, distinct
from sqlalchemy.sql import or_
from bodhi import log
from bodhi.models import Update, Build, Package, Release
import bodhi.schemas
import bodhi.security
from bodhi.validators import (
validate_tags,
validate_enums,
validate_updates,
validate_packages,
validate_release,
)
release = Service(name='release', path='/releases/{name}',
description='Fedora Releases',
cors_origins=bodhi.security.cors_origins_ro)
releases = Service(name='releases', path='/releases/',
description='Fedora Releases',
# Note, this 'rw' is not a typo. the @comments service has
# a ``post`` section at the bottom.
cors_origins=bodhi.security.cors_origins_rw)
@release.get(accept="text/html", renderer="release.html")
def get_release_html(request):
id = request.matchdict.get('name')
release = Release.get(id, request.db)
if not release:
request.errors.add('body', 'name', 'No such release')
request.errors.status = HTTPNotFound.code
updates = request.db.query(Update).filter(
Update.release==release).order_by(
Update.date_submitted.desc())
updates_count = request.db.query(Update.date_submitted, Update.type).filter(
Update.release==release).order_by(
Update.date_submitted.desc())
date_commits = {}
dates = set()
for update in updates_count.all():
d = update.date_submitted
yearmonth = str(d.year) + '/' + str(d.month).zfill(2)
dates.add(yearmonth)
if not update.type.description in date_commits:
date_commits[update.type.description] = {}
if yearmonth in date_commits[update.type.description]:
date_commits[update.type.description][yearmonth] += 1
else:
date_commits[update.type.description][yearmonth] = 0
return dict(release=release,
latest_updates=updates.limit(25).all(),
count=updates.count(),
date_commits=date_commits,
dates = sorted(dates))
@release.get(accept=('application/json', 'text/json'), renderer='json')
@release.get(accept=('application/javascript'), renderer='jsonp')
def get_release_json(request):
id = request.matchdict.get('name')
release = Release.get(id, request.db)
if not release:
request.errors.add('body', 'name', 'No such release')
request.errors.status = HTTPNotFound.code
return release
@releases.get(accept="text/html", schema=bodhi.schemas.ListReleaseSchema,
renderer='releases.html',
validators=(validate_release, validate_updates,
validate_packages))
def query_releases_html(request):
def collect_releases(releases):
x = {}
for r in releases:
if r['state'] in x:
x[r['state']].append(r)
else:
x[r['state']] = [r]
return x
db = request.db
releases = db.query(Release).order_by(Release.id.desc()).all()
return dict(releases=collect_releases(releases))
@releases.get(accept=('application/json', 'text/json'),
schema=bodhi.schemas.ListReleaseSchema, renderer='json',
validators=(validate_release, validate_updates,
validate_packages))
def query_releases_json(request):
db = request.db
data = request.validated
query = db.query(Release)
name = data.get('name')
if name is not None:
query = query.filter(Release.name.like(name))
updates = data.get('updates')
if updates is not None:
query = query.join(Release.builds).join(Build.update)
args = \
[Update.title == update.title for update in updates] +\
[Update.alias == update.alias for update in updates]
query = query.filter(or_(*args))
packages = data.get('packages')
if packages is not None:
query = query.join(Release.builds).join(Build.package)
query = query.filter(or_(*[Package.id == p.id for p in packages]))
# We can't use ``query.count()`` here because it is naive with respect to
# all the joins that we're doing above.
count_query = query.with_labels().statement\
.with_only_columns([func.count(distinct(Release.id))])\
.order_by(None)
total = db.execute(count_query).scalar()
page = data.get('page')
rows_per_page = data.get('rows_per_page')
pages = int(math.ceil(total / float(rows_per_page)))
query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
return dict(
releases=query.all(),
page=page,
pages=pages,
rows_per_page=rows_per_page,
total=total,
)
@releases.post(schema=bodhi.schemas.SaveReleaseSchema,
acl=bodhi.security.admin_only_acl, renderer='json',
validators=(validate_tags, validate_enums)
)
def save_release(request):
"""Save a release
This entails either creating a new release, or editing an existing one. To
edit an existing | release, the release's original name must be specified in
the ``edited`` parameter.
"""
data = request.validated
edited = data.pop("edited", None)
# This has already been validated at this point, but we need to ditch
# it since the models don't care about a csrf argument.
data.pop('csrf_token')
try: |
if edited is None:
log.info("Creating a new release: %s" % data['name'])
r = Release(**data)
else:
log.info("Editing release: %s" % edited)
r = request.db.query(Release).filter(Release.name==edited).one()
for k, v in data.items():
setattr(r, k, v)
except Exception as e:
log.exception(e)
request.errors.add('body', 'release',
'Unable to create update: %s' % e)
return
request.db.add(r)
request.db.flush()
return r
|
Jeff-Tian/mybnb | Python27/Lib/idlelib/run.py | Python | apache-2.0 | 12,890 | 0.00225 | import sys
import linecache
import time
import socket
import traceback
import thread
import threading
import Queue
from idlelib import CallTips
from idlelib import AutoComplete
from idlelib import RemoteDebugger
from idlelib import RemoteObjectBrowser
from idlelib import StackViewer
from idlelib import rpc
from idlelib import PyShell
from idlelib import IOBinding
import __main__
LOCALHOST = '127.0.0.1'
import warnings
def idle_showwarning_subproc(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning after replacing warnings.showwarning.
The only difference is the formatter called.
"""
if file is None:
file = sys.stderr
try:
file.write(PyShell.idle_formatwarning(
message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning_subproc, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning_subproc
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
# Thread shared globals: Establish a queue between a subthread (which handles
# the socket) and the main thread (which runs user code), plus global
# completion, exit and interruptable (the main thread) flags:
exit_now = False
quitting = False
interruptable = False
def main(del_exitfunc=False):
"""Start the Python execution server in a subprocess
In the Python subprocess, RPCServer is instantiated with handlerclass
MyHandler, which inherits register/unregister methods from RPCHandler via
the mix-in class SocketIO.
When the RPCServer 'server' is instantiated, the TCPServer initialization
creates an instance of run.MyHandler and calls its handle() method.
handle() instantiates a run.Executive object, passing it a reference to the
MyHandler object. That reference is saved as attribute rpchandler of the
Executive instance. The Executive methods have access to the reference and
can pass it on to entities that they command
(e.g. RemoteDebugger.Debugger.start_debugger()). The latter, in turn, can
call MyHandler(SocketIO) register/unregister methods via the reference to
register and unregister themselves.
"""
global exit_now
global quitting
global no_exitfunc
no_exitfunc = del_exitfunc
#time.sleep(15) # test subprocess not responding
try:
assert(len(sys.argv) > 1)
port = int(sys.argv[-1])
except:
print>>sys.stderr, "IDLE Subprocess: no IP port passed in sys.argv."
return
capture_warnings(True)
sys.argv[:] = [""]
sockthread = threading.Thread(target=manage_socket,
name='SockThread',
args=((LOCALHOST, port),))
sockthread.setDaemon(True)
sockthread.start()
while 1:
try:
if exit_now:
try:
exit()
except KeyboardInterrupt:
# exiting but got an extra KBI? Try again!
continue
try:
seq, request = rpc.request_queue.get(block=True, timeout=0.05)
except Queue.Empty:
continue
method, args, kwargs = request
ret = method(*args, **kwargs)
rpc.response_queue.put((seq, ret))
except KeyboardInterrupt:
if quitting:
exit_now = True
continue
except SystemExit:
capture_warnings(False)
raise
except:
type, value, tb = sys.exc_info()
try:
print_exception()
rpc.response_queue.put((seq, None))
except:
# Link didn't work, print same exception to __stderr__
traceback.print_exception(type, value, tb, file=sys.__stderr__)
exit()
else:
continue
def manage_socket(address):
for i in range(3):
time.sleep(i)
try:
server = MyRPCServer(address, MyHandler)
break
except socket.error as err:
print>>sys.__stderr__,"IDLE Subprocess: socket error: "\
+ err.args[1] + ", retrying...."
else:
print>>sys.__stderr__, "IDLE Subprocess: Connection to "\
"IDLE GUI failed, exiting."
show_socket_error(err, address)
global exit_now
exit_now = True
return
server.handle_request() # A single request only
def show_ | socket_error(err, address):
import Tkinter
import tkMessageBox
root = Tkinter.Tk()
root.withdraw()
if err.args[0] == 61: # connection refused
msg = "IDLE's subprocess can't connect to %s:%d. This may be due "\
"to your personal | firewall configuration. It is safe to "\
"allow this internal connection because no data is visible on "\
"external ports." % address
tkMessageBox.showerror("IDLE Subprocess Error", msg, parent=root)
else:
tkMessageBox.showerror("IDLE Subprocess Error",
"Socket Error: %s" % err.args[1], parent=root)
root.destroy()
def print_exception():
import linecache
linecache.checkcache()
flush_stdout()
efile = sys.stderr
typ, val, tb = excinfo = sys.exc_info()
sys.last_type, sys.last_value, sys.last_traceback = excinfo
tbe = traceback.extract_tb(tb)
print>>efile, '\nTraceback (most recent call last):'
exclude = ("run.py", "rpc.py", "threading.py", "Queue.py",
"RemoteDebugger.py", "bdb.py")
cleanup_traceback(tbe, exclude)
traceback.print_list(tbe, file=efile)
lines = traceback.format_exception_only(typ, val)
for line in lines:
print>>efile, line,
def cleanup_traceback(tb, exclude):
"Remove excluded traces from beginning/end of tb; get cached lines"
orig_tb = tb[:]
while tb:
for rpcfile in exclude:
if tb[0][0].count(rpcfile):
break # found an exclude, break for: and delete tb[0]
else:
break # no excludes, have left RPC code, break while:
del tb[0]
while tb:
for rpcfile in exclude:
if tb[-1][0].count(rpcfile):
break
else:
break
del tb[-1]
if len(tb) == 0:
# exception was in IDLE internals, don't prune!
tb[:] = orig_tb[:]
print>>sys.stderr, "** IDLE Internal Exception: "
rpchandler = rpc.objecttable['exec'].rpchandler
for i in range(len(tb)):
fn, ln, nm, line = tb[i]
if nm == '?':
nm = "-toplevel-"
if fn.startswith("<pyshell#") and IOBinding.encoding != 'utf-8':
ln -= 1 # correction for coding cookie
if not line and fn.startswith("<pyshell#"):
line = rpchandler.remotecall('linecache', 'getline',
(fn, ln), {})
tb[i] = fn, ln, nm, line
def flush_stdout():
try:
if sys.stdout.softspace:
sys.stdout.softspace = 0
sys.stdout.write("\n")
except (AttributeError, EOFError):
pass
def exit():
"""Exit subprocess, possibly after first deleting sys.exitfunc
If config-main.cfg/.def 'General' 'delete-exitfunc' is True, then any
sys.exitfunc will be removed before exiting. (VPython support)
"""
if no_exitfunc:
try:
del sys.exi |
harinisuresh/yelp-district-clustering | MapClusteringWithLDA.py | Python | mit | 895 | 0.011173 | """Cluster restaurants on map"""
import Clustering
import Map
import DataImporter
def create_topic_clusters_and_map(restaurants, restaurant_ids_to_topics, my_map, lda, use_human_labels=True):
data = Clustering.create | _data_array(restaurants, restaurant_ids_to_topics, my_map)
Clustering.plot_clusters(my_map, restaurants, rest | aurant_ids_to_topics, data, lda)
def run(my_map, reviews, restaurants):
restaurants = Clustering.filter_restaurants(restaurants)
normalized_restaurant_ids_to_topics, lda = Clustering.get_predictions(my_map, reviews, restaurants)
create_topic_clusters_and_map(restaurants, normalized_restaurant_ids_to_topics, my_map, lda)
def main():
my_map = Map.Map.vegas()
reviews = DataImporter.get_vegas_reviews()
restaurants = DataImporter.get_vegas_restaurants()
run(my_map, reviews, restaurants)
if __name__ == '__main__':
main()
|
citrix-openstack-build/sahara | sahara/plugins/mapr/versions/v4_0_1_mrv2/cluster_configurer.py | Python | apache-2.0 | 875 | 0 | # Copyright (c) 2014, | MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writ | ing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sahara.plugins.mapr.versions.base_cluster_configurer as bcc
class ClusterConfigurer(bcc.BaseClusterConfigurer):
def get_hadoop_conf_dir(self):
return '/opt/mapr/hadoop/hadoop-2.4.1/etc/hadoop'
def is_node_awareness_enabled(self):
return False
|
GarrettArm/TheDjangoBook | mysite_project/contact/urls.py | Python | gpl-3.0 | 212 | 0 | from django.urls import path
from . import views
app_name = "contact"
urlpatterns = [
path("thanks", view | s.ThanksView.as_view(), name="thanks"),
path("", views.ContactView.as_view(), | name="contact"),
]
|
HIPS/autograd | tests/test_scipy.py | Python | mit | 13,976 | 0.017602 | from __future__ import absolute_import
from builtins import range
from functools import partial
import numpy as npo
try:
import scipy
except:
from warnings import warn
warn('Skipping scipy tests.')
else:
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.signal
import autograd.scipy.stats as stats
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.special as special
import autograd.scipy.linalg as spla
import autograd.scipy.integrate as integrate
from autograd import grad
from scipy.signal import convolve as sp_convolve
from autograd.test_util import combo_check, check_grads
from numpy_utils import unary_ufunc_check
npr.seed(1)
R = npr.randn
U = npr.uniform
# Fwd mode not yet implemented for scipy functions
combo_check = partial(combo_check, modes=['rev'])
unary_ufunc_check = partial(unary_ufunc_check, modes=['rev'])
check_grads = partial(check_grads, modes=['rev'])
def symmetrize_matrix_arg(fun, argnum):
def T(X): return np.swapaxes(X, -1, -2) if np.ndim(X) > 1 else X
def symmetrize(X): return 0.5 * (X + T(X))
def symmetrized_fun(*args, **kwargs):
args = list(args)
args[argnum] = symmetrize(args[argnum])
return fun(*args, **kwargs)
return symmetrized_fun
### Stats ###
def test_chi2_pdf(): combo_check(stats.chi2.pdf, [0])([R(4)**2 + 1.1], [1, 2, 3])
def test_chi2_cdf(): combo_check(stats.chi2.cdf, [0])([R(4)**2 + 1.1], [1, 2, 3])
def test_chi2_logpdf(): combo_check(stats.chi2.logpdf, [0])([R(4)**2 + 1.1], [1, 2, 3])
def test_beta_cdf(): combo_check(stats.beta.cdf, [0]) ([U(0., 1., 4)], [R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_beta_pdf(): combo_check(stats.beta.pdf, [0,1,2])([U(0., 1., 4)], [R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_beta_logpdf(): combo_check(stats.beta.logpdf, [0,1,2])([U(0., 1., 4)], [R(4)**2 + 1.1], [R(4)**2 + 1.1])
|
def test_gamma_cdf(): combo_check(stats.gamma.cdf, [0]) ([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_gamma_pdf(): combo_check(stats.gamma.pdf, [0,1])([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_gamma_logpdf(): combo_check(stats.gamma.logpdf, [0,1])([R(4)**2 + 1.1], [R(4)**2 + 1.1])
def test_norm_pdf(): combo_check(stats.norm.pdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm | _cdf(): combo_check(stats.norm.cdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_sf(): combo_check(stats.norm.sf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logpdf(): combo_check(stats.norm.logpdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logcdf(): combo_check(stats.norm.logcdf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logsf(): combo_check(stats.norm.logsf, [0,1,2])([R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_pdf_broadcast(): combo_check(stats.norm.pdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_cdf_broadcast(): combo_check(stats.norm.cdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_sf_broadcast(): combo_check(stats.norm.cdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logpdf_broadcast(): combo_check(stats.norm.logpdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logcdf_broadcast(): combo_check(stats.norm.logcdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logsf_broadcast(): combo_check(stats.norm.logcdf, [0,1,2])([R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_poisson_cdf(): combo_check(stats.poisson.cdf, [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
def test_poisson_logpmf(): combo_check(stats.poisson.logpmf, [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
def test_poisson_pmf(): combo_check(stats.poisson.pmf, [1])([np.round(R(4)**2)], [R(4)**2 + 1.1])
def test_poisson_cdf_broadcast(): combo_check(stats.poisson.cdf, [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
def test_poisson_logpmf_broadcast(): combo_check(stats.poisson.logpmf, [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
def test_poisson_pmf_broadcast(): combo_check(stats.poisson.pmf, [1])([np.round(R(4, 3)**2)], [R(4, 1)**2 + 1.1])
def test_t_pdf(): combo_check(stats.t.pdf, [0,1,2,3])([R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_cdf(): combo_check(stats.t.cdf, [0,2])( [R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_logpdf(): combo_check(stats.t.logpdf, [0,1,2,3])([R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_logcdf(): combo_check(stats.t.logcdf, [0,2])( [R(4)], [R(4)**2 + 2.1], [R(4)], [R(4)**2 + 2.1])
def test_t_pdf_broadcast(): combo_check(stats.t.pdf, [0,1,2,3])([R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def test_t_cdf_broadcast(): combo_check(stats.t.cdf, [0,2])( [R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def test_t_logpdf_broadcast(): combo_check(stats.t.logpdf, [0,1,2,3])([R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def test_t_logcdf_broadcast(): combo_check(stats.t.logcdf, [0,2])( [R(4,3)], [R(1,3)**2 + 2.1], [R(4,3)], [R(4,1)**2 + 2.1])
def make_psd(mat): return np.dot(mat.T, mat) + np.eye(mat.shape[0])
def test_mvn_pdf(): combo_check(symmetrize_matrix_arg(mvn.pdf, 2), [0, 1, 2])([R(4)], [R(4)], [make_psd(R(4, 4))], allow_singular=[False])
def test_mvn_logpdf(): combo_check(symmetrize_matrix_arg(mvn.logpdf, 2), [0, 1, 2])([R(4)], [R(4)], [make_psd(R(4, 4))], allow_singular=[False])
def test_mvn_entropy():combo_check(mvn.entropy,[0, 1])([R(4)], [make_psd(R(4, 4))])
C = np.zeros((4, 4))
C[0, 0] = C[1, 1] = 1
# C += 1e-3 * np.eye(4)
def test_mvn_pdf_sing_cov(): combo_check(mvn.pdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True])
def test_mvn_logpdf_sing_cov(): combo_check(mvn.logpdf, [0, 1])([np.concatenate((R(2), np.zeros(2)))], [np.concatenate((R(2), np.zeros(2)))], [C], [True])
def test_mvn_pdf_broadcast(): combo_check(symmetrize_matrix_arg(mvn.pdf, 2), [0, 1, 2])([R(5, 4)], [R(4)], [make_psd(R(4, 4))])
def test_mvn_logpdf_broadcast(): combo_check(symmetrize_matrix_arg(mvn.logpdf, 2), [0, 1, 2])([R(5, 4)], [R(4)], [make_psd(R(4, 4))])
alpha = npr.random(4)**2 + 1.2
x = stats.dirichlet.rvs(alpha, size=1)[0,:]
# Need to normalize input so that x's sum to one even when we perturb them to compute numeric gradient.
def normalize(x): return x / sum(x)
def normalized_dirichlet_pdf( x, alpha): return stats.dirichlet.pdf( normalize(x), alpha)
def normalized_dirichlet_logpdf(x, alpha): return stats.dirichlet.logpdf(normalize(x), alpha)
def test_dirichlet_pdf_x(): combo_check(normalized_dirichlet_pdf, [0])([x], [alpha])
def test_dirichlet_pdf_alpha(): combo_check(stats.dirichlet.pdf, [1])([x], [alpha])
def test_dirichlet_logpdf_x(): combo_check(normalized_dirichlet_logpdf, [0])([x], [alpha])
def test_dirichlet_logpdf_alpha(): combo_check(stats.dirichlet.logpdf, [1])([x], [alpha])
### Misc ###
def test_logsumexp1(): combo_check(special.logsumexp, [0], modes=['fwd', 'rev'])([1.1, R(4), R(3,4)], axis=[None, 0], keepdims=[True, False])
def test_logsumexp2(): combo_check(special.logsumexp, [0], modes=['fwd', 'rev'])([R(3,4), R(4,5,6), R(1,5)], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp3(): combo_check(special.logsumexp, [0], modes=['fwd', 'rev'])([R(4)], b = [np.exp(R(4))], axis=[None, 0], keepdims=[True, False])
def test_logsumexp4(): combo_check(special.logsumexp, [0], modes=['fwd', 'rev'])([R(3,4),], b = [np.exp(R(3,4))], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp5(): combo_check(special.logsumexp, [0], modes=['fwd', 'rev'])([R(2,3,4)], b = [np.exp(R(2,3,4))], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp6():
x = npr.ra |
maestromusic/maestro | maestro/gui/treeview.py | Python | gpl-3.0 | 9,172 | 0.005778 | # -*- coding: utf-8 -*-
# Maestro Music Manager - https://github.com/maestromusic/maestro
# Copyright (C) 2009-2015 Martin Altmayer, Michael Helmling
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from maestro.gui import selection, actions
class TreeviewSelection(selection.Selection):
"""Objects of this class store a selection of nodes in a TreeView. Different than a QItemSelectionModel,
a Selection knows about Nodes, Elements etc and provides special methods to determine properties
of the selection. Actions can use this information to decide whether they are enabled or not.
*model* is a QItemSelectionModel.
"""
def __init__(self, level, model):
"""Initialize with the given *model* (instance of QItemSelectionModel). Computes and stores
all attributes."""
# Get the QAbstractItemModel from a QItemSelectionModel
super().__init__(level,[model.model().data(index) for index in model.selectedIndexes()])
self._model = model
def nodes(self, onlyToplevel=False):
"""Return all nodes that are currently selected. If *onlyToplevel* is True, nodes will be excluded
if an ancestor is also selected.
"""
if not onlyToplevel:
return self._nodes
else:
return [n for n in self._nodes
if not any(self._model.isSelected(self._model.model().getIndex(parent))
for parent in n.getParents())]
class TreeView(QtWidgets.QTreeView):
"""Base class for tree views that contain mostly wrappers. This class handles mainly the
ContextMenuProvider system, that allows plugins to insert entries into the context menus of playlist and
browser.
*level* is the level that contains all elements in the tree (never mix wrappers from different levels!)
*affectGlobalSelection* determines whether the treeview will change the global selection whenever nodes
in it are selected. This should be set to False for treeviews in dialogs.
"""
actionConf = actions.TreeActionConfiguration()
def __init__(self, level, parent=None, affectGlobalSelection=True):
super().__init__(parent)
self.level = level
self.affectGlobalSelection = affectGlobalSelection
self.setHeaderHidden(True)
self.setExpandsOnDoubleClick(False)
self.setAlternatingRowColors(True)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setDragEnabled(True)
self.setDefaultDropAction(Qt.CopyAction)
self.viewport().setMouseTracking(True)
self.treeActions = self.actionConf.createActions(self)
self.actionConf.actionDefinitionAdded.connect(self._handleActionDefAdded)
self.actionConf.actionDefinitionRemoved.connect(self._handleActionDefRemoved)
@classmethod
def addActionDefinition(cls, *args, **kwargs):
if 'actionConf' not in cls.__dict__:
cls.actionConf = actions.TreeActionConfiguration()
cls.actionConf.root.addActionDefinition(*args, **kwargs)
def _handleActionDefAdded(self, actionDef):
self.treeActions[actionDef.identifier] = actionDef.createAction(self)
self.addAction(self.treeActions[actionDef.identifier])
def _handleActionDefRemoved(self, name):
action = self.treeActions[name]
self.removeAction(action)
del self.treeActions[name]
def setModel(self, model):
super().setModel(model)
from . import delegates
if isinstance(self.itemDelegate(), delegates.abstractdelegate.AbstractDelegate):
self.itemDelegate().model = model
self.updateSelection()
def updateSelection(self):
selectionModel = self.selectionModel()
if selectionModel is not None: # happens if the view is empty
self.selection = TreeviewSelection(self.level, selectionModel)
for action in self.treeActions.values():
if isinstance(action, actions.TreeAction):
action.initialize(self.selection)
def localActions(self):
return [action for action in self.actions() if action not in self.treeActions.values()]
def contextMenuEvent(self, event):
menu = self.actionConf.createMenu(self)
for action in self.localActions():
menu.addAction(action)
if menu.isEmpty():
event.ignore()
else:
menu.popup(event.globalPos())
event.accept()
def selectionChanged(self, selected, deselected):
super().selectionChanged(selected, deselected)
self.updateSelection()
if self.affectGlobalSelection:
selection.setGlobalSelection(self.selection)
# def focusInEvent(self, event):
# super().focusInEvent(event)
# self.updateSelection() #TODO: raises a strange segfault bug without any exceptions
# if self.affectGlobalSelection:
# selection.setGlobalSelection(self.selection)
def currentNode(self):
current = self.currentIndex()
if current.isValid():
return current.internalPointer()
def selectedRanges(self):
"""Return the ranges of selected nodes. Each range is a 3-tuple of parent (which doesn't need to be
selected), first index of parent.contents that is selected and the last index that is selected.
"""
selection = self.selectionModel().selection()
return [(self.model().data(itemRange.parent()),itemRange.top(),itemRange.bottom())
for itemRange in selection]
class DraggingTreeView(TreeView):
"""This is the baseclass of tree views that allow to drag and drop wrappers, e.g. playlist and editor.
It handles the following issues:
- Drag&drop actions must be enclosed in one undo-macro.
- Drags between views of the same class default to a move, drags between different views to a copy.
Via the shift and control modifier this default can be overridden.
- Models might need to know when a drag&drop action is going on. For this DraggingTreeView will
call the methods startDrag and endDrag on models which provide them (both without arg | uments).
- Before dropMimeData is called a DraggingTreeView will set the attributes dndSource and dndTarget
| of the receiving model to the sending widget and itself. If the drag was started in an external
application, dndSource will be None.
"""
def __init__(self, level, parent=None, affectGlobalSelection=True):
super().__init__(level, parent, affectGlobalSelection)
self.setDefaultDropAction(Qt.MoveAction)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
@property
def stack(self):
"""Return the stack that is used for changes to this tree."""
from .. import stack
return stack.stack
def startDrag(self, supportedActions):
model = self.model()
self.stack.beginMacro("Drag and Drop")
if hasattr(model, 'startDrag'):
model.startDrag()
try:
super().startDrag(supportedActions)
finally:
if hasattr(model, 'endDrag'):
model.endDrag()
self.stack.endMacro(abortIfEmpty=True)
def _changeDropAction(self, event):
if event.keyboardModifiers() & Qt.ShiftModifier:
|
mick-d/nipype | nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py | Python | bsd-3-clause | 1,115 | 0.012556 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..featuredetection import DumpBinaryTrainingVectors
def test_DumpBinaryTrainingVectors_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputHeaderFilename=dict(argstr='--inputHeaderFilename %s',
),
inputVectorFilename=dict(argstr='--inputVectorFilename %s',
),
terminal_output=dict(deprecated='1.0.0',
no | hash=True,
),
)
inputs = DumpBinaryTrainingVectors.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_DumpBinaryTrainingVectors_outputs():
output_map = dict()
outputs = DumpBinaryTrainingVectors.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outpu | ts.traits()[key], metakey) == value
|
Chyroc/study-code | Language/Python/BuildInConstants.py | Python | mit | 170 | 0 | False |
True
None # 作为默认参数的初始值
NotImplemented
Ellipsis
# 以下不是用于项目的
'''
quit(code=None)
exit(code=None)
copyright
license
cr | edits
'''
|
shucommon/little-routine | python/python-crash-course/file_reader.py | Python | gpl-3.0 | 524 | 0.001908 | filename = 'pi_digits.txt'
with open(filename) as file_object:
| contents = file_object.read() # read all
#print(contents)
print(contents.rstrip())
with open(filename) as file_object:
for line in file_object: # read line by line
print(line.rstrip())
with open(filename) as file_object:
lines = file_object.readlines()
print(lines)
for line in lines:
print(line.rstrip())
filename = 'programming.txt'
with open(filename, 'w') | as file_object:
file_object.write("I love programming.")
|
swoopla/compose | tests/integration/service_test.py | Python | apache-2.0 | 48,237 | 0.00141 | from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
import tempfile
from distutils.spawn import find_executable
from os import path
import pytest
from docker.errors import APIError
from six import StringIO
from six import text_type
from .. import mock
from .testcases import DockerClientTestCase
from .testcases import get_links
from .testcases import pull_busybox
from compose import __version__
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_CONTAINER_NUMBER
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.const import LABEL_VERSION
from compose.container import Container
from compose.errors import OperationFailedError
from compose.project import OneOffFilter
from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy
from compose.service import NetworkMode
from compose.service import Service
from tests.integration.testcases import v2_1_only
from tests.integration.testcases import v2_only
from tests.integration.testcases import v3_only
def create_and_start_container(service, **override_options):
container = service.create_container(**override_options)
return service.start_container(container)
class ServiceTest(DockerClientTestCase):
def test_containers(self):
foo = self.create_service('foo')
bar = self.create_service('bar')
create_and_start_container(foo)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(foo.containers()[0].name, 'composetest_foo_1')
self.assertEqual(len(bar.containers()), 0)
create_and_start_container(bar)
create_and_start_container(bar)
self.assertEqual(len(foo.containers()), 1)
self.assertEqual(len(bar.containers()), 2)
names = [c.name for c in bar.containers()]
self.assertIn('composetest_bar_1', names)
self.assertIn('composetest_bar_2', names)
def test_containers_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(db.containers(stopped=True), [])
self.assertEqual(db.containers(one_off=OneOffFilter.only, stopped=True), [container])
def test_project_is_added_to_container_name(self):
service = self.create_service('web')
create_and_start_container(service)
self.assertEqual(service.containers()[0].name, 'composetest_web_1')
def test_create_container_with_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_one_off_when_existing_container_is_running(self):
db = self.create_service('db')
db.start()
container = db.create_container(one_off=True)
self.assertEqual(container.name, 'composetest_db_run_1')
def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
container = service.create_container()
service.start_container(container)
assert container.get_mount('/var/db')
def test_create_container_with_volume_driver(self):
service = self.create_service('db', volume_driver='foodriver')
container = service.create_container()
service.start_container(container)
self.assertEqual('foodriver', container.get('HostConfig.VolumeDriver'))
def test_create_container_with_cpu_shares(self):
service = self.create_service('db', cpu_shares=73)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpuShares'), 73)
def test_create_container_with_cpu_quota(self):
service = self.create_service('db', cpu_quota=40000)
container = service.create_container()
container.start()
self.assertEqual(container.get('HostConfig.CpuQuota'), 40000)
def test_create_container_with_shm_size(self):
self.require_api_version('1.22')
service = self.create_service('db', shm_size=67108864)
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
def test_create_container_with_init_bool(self):
self.require_api_version('1.25')
service = self.create_service('db', init=True)
container = service.create_container()
service.start_container(container)
assert container.get('HostConfig.Init') is True
def test_create_container_with_init_path(self):
self.require_api_version('1.25')
docker_init_path = find_executable('docker-init')
service = self.create_service('db', init=docker_init_path)
container = service.create_container()
service.start_container(container)
assert container.get('HostConfig.InitPath') == docker_init_path
@pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit')
def test_create_container_with_pids_limit(self):
self.require_api_version('1.23')
service = self.create_service('db', pids_limit=10)
container = service.create_container()
service.start_container(container)
assert container.get('HostConfig.PidsLimit') == 10
def test_create_container_with_extra_hosts_list(self):
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
def test_create_container_with_extra_hosts_dicts(self):
extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
def test_create_container_with_cpu_set(self):
service = self.create_service('db', cpuset='0')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
def test_create_container_with_read_only_root_fs(sel | f):
read_only = True
service = self.create_service('db', read_only=read_only)
container = service.create_container()
service.start_container(container)
assert container.get('Ho | stConfig.ReadonlyRootfs') == read_only
def test_create_container_with_security_opt(self):
security_opt = ['label:disable']
service = self.create_service('db', security_opt=security_opt)
container = service.create_container()
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
def test_create_container_with_mac_address(self):
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
container = service.create_container()
service.start_container(container)
self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
def test_create_container_with_specified_volume(self):
host_path = '/tmp/host-path'
container_path = '/container-path'
service = self.create_service(
'db',
volumes=[VolumeSpec(host_path, container_path, 'rw')])
container = service.create_container()
service.start_container(container)
assert container.get_mount(container_path)
# Match the last component ("host-path"), because boot2docker symlinks /tmp
|
shannpersand/cooper-type | _resources/FDK Adobe/Tools/FontLabMacros/MM Designs/SaveFilesForMakeInstances.py | Python | cc0-1.0 | 17,235 | 0.026748 | #FLM: Save Files for MakeInstances
###################################################
### THE VALUES BELOW CAN BE EDITED AS NEEDED ######
###################################################
kDefaultMMFontFileName = "mmfont.pfa"
kInstancesDataFileName = "instances"
kCompositeDataName = "temp.composite.dat"
###################################################
__copyright__ = """
Copyright 2014-2016 Adobe Systems Incorporated (http://www.adobe.com/). All Rights Reserved.
This software is licensed as OpenSource, under the Apache License, Version 2.0. This license is available at: http://opensource.org/licenses/Apache-2.0.
"""
__doc__ = """
Save Files for MakeInstances v2.0 - April 12 2016
This script will do part of the work to create a set of single-master fonts
("instances") from a Multiple Master (MM) FontLab font. It will save a
Type 1 MM font (needed by the makeInstances program) and, in some cases,
a text file named 'temp.composite.dat' that contains data related with
composite glyphs.
You must then run the makeInstances program to actually build the instance Type 1
fonts. makeInstances can remove working glyphs, and rename MM-exception glyphs.
It will also do overlap removal, and autohint the instance fonts. This last is
desirable, as autohinting which is specific to an instance font is usually
significantly better than the hinting from interpolating the MM font hints.
As always with overlap removal, you should check all affected glyphs - it
doesn't always do the right thing.
Note that the makeInstances program can be run alone, given an MM Type1 font
file. However, if you use the ExceptionSuffixes keyword, then you must run
this script first. The script will make a file that identifies composite glyphs,
and allows makeInstances to correctly substitute contours in the composite glyph
from the exception glyph. This is necessary because FontLab cannot write all the
composite g | lyphs as Type 1 composites (also known as SEAC glyphs). This script
must be run again to renew this data file whenever changes are made to composite
glyphs.
Both this sc | ript and the "makeInstances" program depend on info provided by an
external text file named "instances", which contains all the instance-specific
values. The "instances" file must be a simple text file, located in the same
folder as the MM FontLab file.
For information on how to format the "instances" file, please read the
documentation in the InstanceGenerator.py script.
==================================================
Versions:
v2.0 - Apr 12 2016 - Added step to fix the MM FontBBox values of the mmfont.pfa file,
when the VFB's UPM value is not 1000 (long-standing FontLab bug).
v1.0 - Feb 15 2010 - Initial release
"""
import copy
import re
import os
kFieldsKey = "#KEYS:"
kFamilyName = "FamilyName"
kFontName = "FontName"
kFullName = "FullName"
kWeight = "Weight"
kCoordsKey = "Coords"
kIsBoldKey = "IsBold" # This is changed to kForceBold in the instanceDict when reading in the instance file.
kForceBold = "ForceBold"
kIsItalicKey = "IsItalic"
kExceptionSuffixes = "ExceptionSuffixes"
kExtraGlyphs = "ExtraGlyphs"
kFixedFieldKeys = {
# field index: key name
0:kFamilyName,
1:kFontName,
2:kFullName,
3:kWeight,
4:kCoordsKey,
5:kIsBoldKey,
}
kNumFixedFields = len(kFixedFieldKeys)
kBlueScale = "BlueScale"
kBlueShift = "BlueShift"
kBlueFuzz = "BlueFuzz"
kBlueValues = "BlueValues"
kOtherBlues = "OtherBlues"
kFamilyBlues = "FamilyBlues"
kFamilyOtherBlues = "FamilyOtherBlues"
kStdHW = "StdHW"
kStdVW = "StdVW"
kStemSnapH = "StemSnapH"
kStemSnapV = "StemSnapV"
kAlignmentZonesKeys = [kBlueValues, kOtherBlues, kFamilyBlues, kFamilyOtherBlues]
kTopAlignZonesKeys = [kBlueValues, kFamilyBlues]
kMaxTopZonesSize = 14 # 7 zones
kBotAlignZonesKeys = [kOtherBlues, kFamilyOtherBlues]
kMaxBotZonesSize = 10 # 5 zones
kStdStemsKeys = [kStdHW, kStdVW]
kMaxStdStemsSize = 1
kStemSnapKeys = [kStemSnapH, kStemSnapV]
kMaxStemSnapSize = 12 # including StdStem
class ParseError(ValueError):
pass
def validateArrayValues(arrayList, valuesMustBePositive):
for i in range(len(arrayList)):
try:
arrayList[i] = eval(arrayList[i])
except (NameError, SyntaxError):
return
if valuesMustBePositive:
if arrayList[i] < 0:
return
return arrayList
def readInstanceFile(instancesFilePath):
f = open(instancesFilePath, "rt")
data = f.read()
f.close()
lines = data.splitlines()
i = 0
parseError = 0
keyDict = copy.copy(kFixedFieldKeys)
numKeys = kNumFixedFields
numLines = len(lines)
instancesList = []
for i in range(numLines):
line = lines[i]
# Skip over blank lines
line2 = line.strip()
if not line2:
continue
# Get rid of all comments. If we find a key definition comment line, parse it.
commentIndex = line.find('#')
if commentIndex >= 0:
if line.startswith(kFieldsKey):
if instancesList:
print "ERROR: Header line (%s) must preceed a data line." % kFieldsKey
raise ParseError
# parse the line with the field names.
line = line[len(kFieldsKey):]
line = line.strip()
keys = line.split('\t')
keys = map(lambda name: name.strip(), keys)
numKeys = len(keys)
k = kNumFixedFields
while k < numKeys:
keyDict[k] = keys[k]
k +=1
continue
else:
line = line[:commentIndex]
continue
# Must be a data line.
fields = line.split('\t')
fields = map(lambda datum: datum.strip(), fields)
numFields = len(fields)
if (numFields != numKeys):
print "ERROR: In line %s, the number of fields %s does not match the number of key names %s (FamilyName, FontName, FullName, Weight, Coords, IsBold)." % (i+1, numFields, numKeys)
parseError = 1
continue
instanceDict= {}
#Build a dict from key to value. Some kinds of values needs special processing.
for k in range(numFields):
key = keyDict[k]
field = fields[k]
if not field:
continue
if field in ["Default", "None", "FontBBox"]:
# FontBBox is no longer supported - I calculate the real
# instance fontBBox from the glyph metrics instead,
continue
if key == kFontName:
value = field
elif key in [kExtraGlyphs, kExceptionSuffixes]:
value = eval(field)
elif key in [kIsBoldKey, kIsItalicKey, kCoordsKey]:
try:
value = eval(field) # this works for all three fields.
if key == kIsBoldKey: # need to convert to Type 1 field key.
instanceDict[key] = value
# add kForceBold key.
key = kForceBold
if value == 1:
value = "true"
else:
value = "false"
elif key == kIsItalicKey:
if value == 1:
value = "true"
else:
value = "false"
elif key == kCoordsKey:
if type(value) == type(0):
value = (value,)
except (NameError, SyntaxError):
print "ERROR: In line %s, the %s field has an invalid value." % (i+1, key)
parseError = 1
continue
elif field[0] in ["[","{"]: # it is a Type 1 array value. Turn it into a list and verify that there's an even number of values for the alignment zones
value = field[1:-1].split() # Remove the begin and end brackets/braces, and make a list
if key in kAlignmentZonesKeys:
if len(value) % 2 != 0:
print "ERROR: In line %s, the %s field does not have an even number of values." % (i+1, key)
parseError = 1
continue
if key in kTopAlignZonesKeys: # The Type 1 spec only allows 7 top zones (7 pairs of values)
if len(value) > kMaxTopZonesSize:
print "ERROR: In line %s, the %s field has more than %d values." % (i+1, key, kMaxTopZonesSize)
parseError = 1
continue
else:
newArray = validateArrayValues(value, False) # False = values do NOT have to be all positive
if newArray:
value = newArray
else:
print "ERROR: In line %s, the %s field contains invalid values." % (i+1, key)
parseError = 1
continue
currentArray = value[:] # make copy, not reference
value.sort()
if currentArray != value:
print "WARNING: In line %s, the values in the %s field were sorted in ascending order." % (i+1, key)
if key in kBotAlignZonesKeys: # The Type 1 spec only allows |
tadhg-ohiggins/regulations-parser | regparser/notice/amendments/utils.py | Python | cc0-1.0 | 237 | 0 | def label_amdpar_from(instruction_xml):
label_parts = instruction_xml.get('label', '').split('-')
# <AMDPAR><EREGS_INSTRUCTIONS><INSTR | UCTION>...
amdpar = instruction_xml.getparent().getparent()
return label_par | ts, amdpar
|
pombredanne/PyGithub | github/Notification.py | Python | gpl-3.0 | 5,674 | 0.005111 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Peter Golm <golm.peter@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Softw | are Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Le | sser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.Repository
import github.NotificationSubject
class Notification(github.GithubObject.CompletableGithubObject):
"""
This class represents Notifications. The reference can be found here http://developer.github.com/v3/activity/notifications/
"""
@property
def id(self):
"""
:type: string
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def last_read_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._last_read_at)
return self._last_read_at.value
@property
def repository(self):
"""
:type: :class:`github.Repository.Repository`
"""
self._completeIfNotSet(self._repository)
return self._repository.value
@property
def subject(self):
"""
:type: :class:`github.NotificationSubject.NotificationSubject`
"""
self._completeIfNotSet(self._subject)
return self._subject.value
@property
def reason(self):
"""
:type: string
"""
self._completeIfNotSet(self._reason)
return self._reason.value
@property
def subscription_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscription_url)
return self._subscription_url.value
@property
def unread(self):
"""
:type: bool
"""
self._completeIfNotSet(self._unread)
return self._unread.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def _initAttributes(self):
self._id = github.GithubObject.NotSet
self._last_read_at = github.GithubObject.NotSet
self._repository = github.GithubObject.NotSet
self._reason = github.GithubObject.NotSet
self._subscription_url = github.GithubObject.NotSet
self._unread = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "last_read_at" in attributes: # pragma no branch
self._last_read_at = self._makeDatetimeAttribute(attributes["last_read_at"])
if "repository" in attributes: # pragma no branch
self._repository = self._makeClassAttribute(github.Repository.Repository, attributes["repository"])
if "subject" in attributes: # pragma no branch
self._subject = self._makeClassAttribute(github.NotificationSubject.NotificationSubject, attributes["subject"])
if "reason" in attributes: # pragma no branch
self._reason = self._makeStringAttribute(attributes["reason"])
if "subscription_url" in attributes: # pragma no branch
self._subscription_url = self._makeStringAttribute(attributes["subscription_url"])
if "unread" in attributes: # pragma no branch
self._unread = self._makeBoolAttribute(attributes["unread"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
cgwalters/mock | py/mockbuild/plugins/yum_cache.py | Python | gpl-2.0 | 4,641 | 0.00237 | # -*- coding: utf-8 -*-
# vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:textwidth=0:
# License: GPL2 or later see COPYING
# Written by Michael Brown
# Copyright (C) 2007 Michael E Brown <mebrown@michaels-house.net>
# python library imports
import fcntl
import glob
import os
import time
# our imports
from mockbuild.mounts import BindMountPoint
from mockbuild.trace_decorator import getLog, traceLog
import mockbuild.util
# set up logging, module options
requires_api_version = "1.1"
# plugin entry point
@traceLog()
def init(plugins, conf, buildroot):
YumCache(plugins, conf, buildroot)
class YumCache(object):
""" mount /var/cache/yum or /var/cache/dnf of your machine to chroot """
METADATA_EXTS = (".sqlite", ".xml", ".bz2", ".gz", ".xz", ".solv", ".solvx")
@traceLog()
def __init__(self, plugins, conf, buildroot):
self.buildroot = buildroot
self.config = buildroot.config
self.state = buildroot.state
self.yum_cache_opts = conf
self.yum_cache_opts['package_manager'] = self.config['package_manager']
self.yumSharedCachePath = self.yum_cache_opts['dir'] % self.yum_cache_opts
self.target_path = self.yum_cache_opts['target_dir'] % self.yum_cache_opts
self.online = self.config['online']
plugins.add_hook("preyum", self._yumCachePreYumHook)
plugins.add_hook("postyum", self._yumCachePostYumHook)
plugins.add_hook("preinit", self._yumCachePreInitHook)
buildroot.mounts.add(BindMountPoint(srcpath=self.yumSharedCachePath,
bindpath=buildroot.make_chroot_path(self.target_path)))
mockbuild.util.mkdirIfAbsent(self.yumSharedCachePath)
self.yumCacheLock = open(os.path.join(self.yumSharedCachePath, "yumcache.lock"), "a+")
# =============
# 'Private' API
# =============
# lock the shared yum cache (when enabled) before any access
# by yum, and prior to cleaning it. This prevents simultaneous access from
# screwing things up. This can possibly happen, eg. when running multiple
# mock instances with --uniqueext=
@traceLog()
def _yumCachePreYumHook(self):
try:
fcntl.lockf(self.yumCacheLock.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
self.state.start("Waiting for yumcache lock")
fcntl.lockf(self.yumCacheLock.fileno(), fcntl.LOCK_EX)
self.state.finish("Waiting for yumcache lock")
@traceLog()
def _yumCachePostYumHook(self):
fcntl.lockf(self.yumCacheLock.fileno(), fcntl.LOCK_UN)
def _format_pm(self, s):
return s.format(pm=self.config['package_manager'])
@traceLog()
def _yumCachePreInitHook(self):
getLog().info(self._format_pm("enabled {pm} cache"))
mockbuild.util.mkdirIfAbsent(self.buildroot.make_chroot_path(self.target_path))
# lock so others dont accidentally use yum cache while we operate on it.
self._yumCachePreYumHook()
if self.online:
state = self._format_pm("cleaning {pm} metadata")
self.state.start( | state)
for (dirpath, _, filenames) in os.walk(self.yumSharedCachePath):
for filename in filenames:
fullPath = os.path.join(dirpath, filename)
statinfo = os.stat(fullPath)
file_age_days = (time.time() - statinfo.st_ctime) / (60 * 60 * 24)
# prune repodata so yum redownloads.
# prevents certain errors where yum gets stuck due to bad metadata
| for ext in self.METADATA_EXTS:
if filename.endswith(ext) and file_age_days > self.yum_cache_opts['max_metadata_age_days']:
os.unlink(fullPath)
fullPath = None
break
if fullPath is None:
continue
if file_age_days > self.yum_cache_opts['max_age_days']:
os.unlink(fullPath)
continue
self.state.finish(state)
# yum made an rpmdb cache dir in $cachedir/installed for a while;
# things can go wrong in a specific mock case if this happened.
# So - just nuke the dir and all that's in it.
if os.path.exists(self.yumSharedCachePath + '/installed'):
for fn in glob.glob(self.yumSharedCachePath + '/installed/*'):
os.unlink(fn)
os.rmdir(self.yumSharedCachePath + '/installed')
self._yumCachePostYumHook()
|
MatthewWilkes/mw4068-packaging | src/melange/src/melange/profiler.py | Python | apache-2.0 | 119 | 0.033613 | import gae_django
from soc.profiling import viewer
def main():
| viewer.ma | in()
if __name__ == '__main__':
main()
|
maartenq/ansible | lib/ansible/modules/cloud/vmware/vmware_host_firewall_manager.py | Python | gpl-3.0 | 8,047 | 0.002734 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
delegate_to: localhost
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": true,
"desired_state": true,
"previous_state": true
},
"vvold": {
"current_state": true,
"desired_state": true,
"previous_state": true
}
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] | = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
| temp_rule_dict['enabled'] = rule_set_obj.enabled
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
if rule_name not in self.firewall_facts[host.name]:
self.module.fail_json(msg="rule named '%s' wasn't found." % rule_name)
rule_enabled = rule_option.get('enabled', None)
if rule_enabled is None:
self.module.fail_json(msg="Please specify rules.enabled for rule set"
" %s as it is required parameter." % rule_name)
current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
if current_rule_state != rule_enabled:
try:
if rule_enabled:
firewall_system.EnableRuleset(id=rule_name)
else:
firewall_system.DisableRuleset(id=rule_name)
fw_change_list.append(True)
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to enable rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
results['rule_set_state'][host.name][rule_name] = dict(current_state=rule_enabled,
previous_state=current_rule_state,
desired_state=rule_enabled,
)
if any(fw_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
rules=dict(type='list', default=list(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_firewall_manager = VmwareFirewallManager(module)
vmware_firewall_manager.ensure()
if __name__ == "__main__":
main()
|
xuelians/djmoney | balance/migrations/0003_auto_20170809_0736.py | Python | gpl-3.0 | 457 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-08-09 07:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration): |
dependencies = [
('balance', '0002_auto_20170809_0734'),
]
operations = [
migrations.AlterField(
model_name='transaction',
n | ame='date',
field=models.DateField(verbose_name='Date'),
),
]
|
xod442/sample_scripts | flo2_pid.py | Python | gpl-2.0 | 7,035 | 0.017342 | #!/usr/bin/env python
#--------------------------------------------------------------------------
# flo2_pid.py
# Rick Kauffman a.k.a. Chewie
#
# Hewlett Packard Company Revision: 1.0
# ~~~~~~~~~ WookieWare ~~~~~~~~~~~~~
# Change history....09/03/2014
#
#
##--------------------------------------------------------------------------
# Initial release - Pulls VARS from webform.
# build a database of all dpids not in glarn
# Calls glarn chooser deletes dpids
#
#
#------Might not need this but please they are handy------------------------
#
# Do the imports!!!!
#----------------------If you dont have it use "apt-get install (name)"
import sys
import subprocess
import cgi
import cgitb; cgitb.enable()
import hpsdnclient as hp
import sqlite3
import requests
from requests.auth import HTTPDigestAuth
import xml.etree.ElementTree as xml
# import pdb; pdb.set_trace()
#-------------------------------------------------------------------------
# Get the field VARS from the calling HTML form
#-------------------------------------------------------------------------
form = cgi.FieldStorage()
server = form.getvalue('server')
user = form.getvalue('user')
passw = form.getvalue('passw')
imc_server = form.getvalue('imc_server')
imc_user = form.getvalue('imc_user')
imc_passw = form.getvalue('imc_passw')
pid_list = form.getvalue('list_o_pids')
imc = form.getvalue('imc')
if pid_list == None:
print "Content-type:text/html\r\n\r\n"
print "<!DOCTYPE html>"
print "<html>"
print "<head>"
print "<title> Wookieware.com</title>"
print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
print "</head>"
print "<body>"
print "<h1> <img src=\"../../images/glarn.png\" width=\"50\" height=\"50\">glarn: The dpid database</h1>"
print "<HR> "
print "<h1> No items selected</h1>"
print "<FORM method='post' ACTION=\"./pid_main.py\">"
print "<h3> List is empty </h3>"
print "<p> Click button below to go back to the system chooser</p>"
print "<hr>"
print "<input type=\"submit\" style=\"font-face: 'Arial'; font-size: larger; color: black; background-color: #0066FF; border: 3pt ridge lightgrey\" value=\" Main Menu\">"
print "<input type=\"hidden\" name=\"server\" value=%s>" % (server)
print "<input type=\"hidden\" name=\"user\" value=%s>" % (user)
print "<input type=\"hidden\" name=\"passw\" value=%s>" % (passw)
print "<input type=\"hidden\" name=\"imc_server\" value=%s>" % (imc_server)
print "<input type=\"hidden\" name=\"imc_user\" value=%s>" % (imc_user)
print "<input type=\"hidden\" name=\"imc_passw\" value=%s>" % (imc_passw)
print "<input type=\"hidden\" name=\"imc\" value=%s>" % (imc)
print "<p>For more information on how to use this application <a href=\"/faq.html\">User Guide</a></p>"
print "<center><font face=\"Arial\" size=\"1\">SDN Solutions From WookieWare 2014</font></center>"
print "</body>"
print "</html>"
#glarn.close()
sys.exit()
x = len(pid_list) # Keep track of how many items we need to process
# Check to see if anything was chozen. If x is zero goto Nothing Selected page and exit
j = 0
#Create authorization Token for the SDN controller
auth = hp.XAuthToken(user=user,password=passw,server=server)
api=hp.Api(controller=server,auth=auth)
#--------------------------------------------------------------------------
# dpid factory: Break up dpis and match to vendor to determin MAC address
#---------------------------------------------------------------------------
print "Content-type:text/html\r\n\r\n"
print "<!DOCTYPE html>"
print "<html>"
print "<head>"
print "<title> Wookieware.com</title>"
print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
print "</head>"
print "<body>"
print "<h1> <img src=\"../../images/glarn.png\" width=\"50\" height=\"50\">glarn: The dpid database</h1>"
print "<HR> "
print "<h3> Dpid flows display</h3>"
print "<p>List of current flows by dpid"
print "<FORM method='post' ACTION=\"./pid_main.py\">"
# Delete records in database
if x == 23:#only one entry (dpids are 23 chars long)
try:
flows = api.get_flows(pid_list)
print "<h1>Flows for dpid %s:</h1>" % (pid_list)
print "<table border=\"1\" cellpadding=\"3\" class=\"TFtable\">"
print "<tr> <td> Host MAC address </td> <td> Destination MAC address </td> <td> Output Port </td> </tr>"
for f in flows:
eth_src = f.match.eth_src
eth_dst = f.match.eth_dst
action = f.actions.output
print "<tr> <td> %s </td> <td> %s </td> <td> %s </td> </tr>" % (eth_src, eth_dst, action)
print "</table>"
except:
print "<h1>Error getting dpid information %s</h1>" % (pid_list)
elif j == 0:
for i in pid_list:
flows = api.get_flows(i)
print "<h1>Flows for dpid %s:</h1>" % (i)
print "<table border=\"1\" cellpadding=\"3\" class=\"TFtable\">"
print "<tr> <td> Host MAC address </td> <td> Destination MAC address </td> <td> Output Port </td> </tr>"
for f in flows:
eth_src = f.match.eth_src
eth_dst = f.match.eth_dst
action = f.actions.output
print "<tr> <td> %s </td> <td> %s </td> <td> %s </td> </tr>" % (eth_src, eth_dst, action)
print "</table>"
#--------------------------------------------------------------------------
# Finish manual or go home
#---------------------------------------------------------------------------
#print "Content-type:text/html\r\n\r\n"
#print "<!DOCTYPE html>"
#print "<html>"
#print "<head>"
#print "<title> Wookieware.com</title>"
#print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
#print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
#print "</head>"
print "<HR>"
print "<input type=\"submit\" style=\"font-face: 'Arial'; font-size: larger; color: black; background-color: #0066FF; border: 3pt ridge lightgrey\" value=\" Main Menu\">"
print "<input type=\"hidden\" name=\"server\" value=%s>" % (server)
print "<input type=\"hidden\" name=\"user\" value=%s>" % (user)
print "<input type=\"hidden\" name=\"passw\" value=%s>" % (passw)
print | "<input type=\"hidden\" name=\"imc_server\" value=%s>" % (imc_server)
print "<input type=\"hidden\" name=\ | "imc_user\" value=%s>" % (imc_user)
print "<input type=\"hidden\" name=\"imc_passw\" value=%s>" % (imc_passw)
print "<input type=\"hidden\" name=\"imc\" value=%s>" % (imc)
print "</form>"
print "<footer>"
print "<p>For more information on how to use this application <a href=\"/faq.html\">User Guide</a></p>"
print "<a href=\"/index.html\">BACK</a>"
print "<center><font face=\"Arial\" size=\"1\">SDN Solutions From WookieWare 2014</font></center>"
print "</footer>"
print "</body>"
print "</html>"
sys.exit()
|
adrianhindes/cavity-sml | createOnlyNoise.py | Python | mit | 2,479 | 0.002824 | # Running Finesse
im | port subprocess
# Editing kat file
import fileinput
# Copying files
import shutil
# Adding Gaussian noise
from skimage import io, util
# Navigating directories
import os
# Cropping raw images
from PIL import Image
# Nice loading bars for loops
# Keras preprocessing package for augmentation
from keras.preprocessing.image import I | mageDataGenerator, array_to_img, \
img_to_array, load_img
'''
Just generate noisy images, if raw ones have already
been generated by Finesse
'''
maxMode = 6
# Define data folders
dataFolder = 'rawData'
newDataFolder = 'newData'
# Image extension
ext = '.png'
# No. images to generate per raw image
imageNum = 100
# Preprocessing
# Data generators, using Keras
trainDatagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
testDatagen = ImageDataGenerator(
rotation_range=25,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.3,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
#
files = os.listdir(dataFolder)
imageList = [x for x in files if '.png' in x]
possibleModes = [str(m)+str(n) for m in range(maxMode) for n in range(maxMode)]
# Create file structure if it does not exist
if not os.path.exists(newDataFolder): os.mkdir(newDataFolder)
# Generators which read pictures in subfolders of training and validation,
# indefinitely generate batches of augmented cavity images
print('Preprocessing raw data set')
for mode in possibleModes:
for image in [x for x in files if 'cavity'+mode in x]:
print('Generating batch '+image)
loaded = load_img('rawData/'+image)
array = img_to_array(loaded)
array = array.reshape((1,)+array.shape)
i = 0
for batch in trainDatagen.flow(array, batch_size=1, save_to_dir=newDataFolder,
save_prefix=mode, save_format='png'):
i += 1
if i > imageNum: break
newFiles = os.listdir(newDataFolder)
# Adding noise
def noisy(img):
noisy = util.random_noise(loaded, mode='gaussian', clip=True)
return noisy
for image in newFiles:
loaded = io.imread(newDataFolder+'/'+image)
io.imsave(newDataFolder+'/'+image, noisy(image))
|
hlzz/dotfiles | graphics/VTK-7.0.0/IO/Geometry/Testing/Python/Plot3DVectors.py | Python | bsd-3-clause | 3,828 | 0.000522 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# All Plot3D vector functions
#
# Create the RenderWindow, Renderer and both Actors
#
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
ren1 = vtk.vtkRenderer()
ren1.SetBackground(.8, .8, .2)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
vectorLabels = ["Velocity", "Vorticity", "Momentum", "Pressure_Gradient"]
vectorFunctions = ["200", "201", "202", "210"]
camera = vtk.vtkCamera()
light = vtk.vtkLight()
# All text actors will share the same text prop
textProp = vtk.vtkTextProperty()
textProp.SetFontSize(10)
textProp.SetFontFamilyToArial()
textProp.SetColor(.3, 1, 1)
i = 0
for vectorFunction in vectorFunctions:
exec("pl3d" + vectorFunction + " = vtk.vtkMultiBlockPLOT3DReader()")
eval("pl3d" + vectorFunction).SetXYZFileName(
VTK_DATA_ROOT + "/Data/bluntfinxyz.bin")
eval("pl3d" + vectorFunction).SetQFileName(
VTK_DATA_ROOT + "/Data/bluntfinq.bin")
eval("pl3d" + vectorFunction).SetVectorFunctionNumber(int(vectorFunction))
eval("pl3d" + vectorFunction).Update()
output = eval("pl3d" + vectorFunction).GetOutput().GetBlock(0)
exec("plane" + vectorFunction + " = vtk.vtkStructuredGridGeometryFilter()")
eval("plane" + vectorFunction).SetInputData(output)
eval("plane" + vectorFunction).SetExtent(25, 25, 0, 100, 0, 100)
exec("hog" + vectorFunction + " = vtk.vtkHedgeHog()")
eval("hog" + vectorFunction).SetInputConnection(
eval("plane" + vectorFunction).GetOutputPort())
maxnorm = output.GetPointData().GetVectors().GetMaxNorm()
|
eval("hog" + vectorFunction).SetScaleFactor(1.0 / maxnorm)
exec("mapper" + vectorFunction + " = vtk.vtkPolyDataMapper()")
eval("mapper" + vectorFunction).SetInputConnection(
eval("hog" + vectorFunction).GetOutputPort())
exec("actor" + vectorFunction + " = vtk.vtkActor()")
eval | ("actor" + vectorFunction).SetMapper(eval("mapper" + vectorFunction))
exec("ren" + vectorFunction + " = vtk.vtkRenderer()")
eval("ren" + vectorFunction).SetBackground(0.5, .5, .5)
eval("ren" + vectorFunction).SetActiveCamera(camera)
eval("ren" + vectorFunction).AddLight(light)
renWin.AddRenderer(eval("ren" + vectorFunction))
eval("ren" + vectorFunction).AddActor(eval("actor" + vectorFunction))
exec("textMapper" + vectorFunction + " = vtk.vtkTextMapper()")
eval("textMapper" + vectorFunction).SetInput(vectorLabels[i])
eval("textMapper" + vectorFunction).SetTextProperty(textProp)
exec("text" + vectorFunction + " = vtk.vtkActor2D()")
eval("text" + vectorFunction).SetMapper(eval("textMapper" + vectorFunction))
eval("text" + vectorFunction).SetPosition(2, 5)
eval("ren" + vectorFunction).AddActor2D(eval("text" + vectorFunction))
i += 1
#
# now layout renderers
column = 1
row = 1
deltaX = 1.0 / 2.0
deltaY = 1.0 / 2.0
for vectorFunction in vectorFunctions:
eval("ren" + vectorFunction).SetViewport(
(column - 1) * deltaX + (deltaX * .05),
(row - 1) * deltaY + (deltaY * .05),
column * deltaX - (deltaX * .05),
row * deltaY - (deltaY * .05))
column += 1
if (column > 2):
column = 1
row += 1
camera.SetViewUp(1, 0, 0)
camera.SetFocalPoint(0, 0, 0)
camera.SetPosition(.4, -.5, -.75)
ren200.ResetCamera()
camera.Dolly(1.25)
for vectorFunction in vectorFunctions:
eval("ren" + vectorFunction).ResetCameraClippingRange()
light.SetPosition(camera.GetPosition())
light.SetFocalPoint(camera.GetFocalPoint())
renWin.SetSize(350, 350)
renWin.Render()
iren.Initialize()
#iren.Start()
|
kashifpk/pyckapps.visit_counter | lib/dates.py | Python | apache-2.0 | 519 | 0.001927 | "Dates related functions"
from datetime import datetime, timedelta
def process_date_range(date_from, date_to):
"""
| Given a date range in st | ring format (yyyy-mm-dd), returns date objects
- date_to is inclusive
- if just date_from is given sets date_to = date_from + 1 day
"""
if not date_to:
date_to = date_from
date_from = datetime.strptime(date_from, '%Y-%m-%d')
date_to = datetime.strptime(date_to, '%Y-%m-%d') + timedelta(days=1)
return (date_from, date_to) |
borringrafael/fichas-equipes-ab | setup/wsgi.py | Python | gpl-3.0 | 399 | 0 | """
WSGI config for setup project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/h | owto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_ | application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "setup.production_settings")
application = get_wsgi_application()
|
institution/keyplay | keycode.py | Python | agpl-3.0 | 2,490 | 0.059438 | # coding: utf-8
# keycode table --------------------------------------------------------
g_keycodes = [
(1, 'ESC'),
(2, '1'),
(3, '2'),
(4, '3'),
(5, '4'),
(6, '5'),
(7, '6'),
(8, '7'),
(9, '8'),
(10, '9'),
(11, '0'),
(12, 'MINUS'),
(13, 'EQUAL'),
(14, 'BACKSPACE'),
(15, 'TAB'),
(16, 'Q'),
(17, 'W'),
(18, 'E'),
(19, 'R'),
(20, 'T'),
(21, 'Y'),
(22, 'U'),
(23, 'I'),
(24, 'O'),
(25, 'P'),
(26, 'LEFTBRACE'),
(27, 'RIGHTBRACE'),
(28, 'ENTER'),
(29, 'LEFTCTRL'),
(30, 'A'),
(31, 'S'),
(32, 'D'),
(33, 'F'),
(34, 'G'),
(35, 'H'),
(36, 'J'),
(37, 'K'),
(38, 'L'),
(39, 'SEMICOLON'),
(40, 'APOSTROPHE'),
(41, 'GRAVE'),
(42, 'LEFTSHIFT'),
(43, 'BACKSLASH'),
(44, 'Z'),
(45, 'X'),
(46, 'C'),
(47, 'V'),
(48, 'B'),
(49, 'N'),
(50, 'M'),
(51, 'COMMA'),
(52, 'DOT'),
(53, 'SLASH'),
(54, 'RIGHTSHIFT'),
(55, 'KPASTERISK'),
(56, 'LEFTALT'),
(57, 'SPACE'),
(58, 'CAPSLOCK'),
(59, 'F1'),
(60, 'F2'),
(61, 'F3'),
(62, 'F4'),
(63, 'F5'),
(64, 'F6'),
(65, 'F7'),
(66, 'F8'),
(67, 'F9'),
(68, 'F10'),
(69, 'NUMLOCK'),
(70, 'SCROLLLOCK'),
(71, 'KP7'),
(72, 'KP8'),
(73, 'KP9'),
(74, 'KPMINUS'),
(75, 'KP4'),
(76, 'KP5'),
(77, 'KP6'),
(78, 'KPPLUS'),
(79, 'KP1'),
(80, 'KP2'),
(81, 'KP3'),
(82, 'KP0'),
(83, 'KPDOT'),
(85, 'ZENKAKU | HANKAKU'),
(86, '102ND'),
(87, 'F11'),
(88, 'F12'),
(89, 'RO'),
(90, 'KATAKANA'),
(91, 'HIRAGANA'),
(92, 'HENKAN'),
(93, 'KATAKANAHIRAGANA'),
(94, 'MUHENKAN'),
(95, 'KPJPCOMMA'),
(96, 'KPENTER'),
(97, 'RIGHTCTRL'),
(98, 'KPSLASH'),
(99, 'SYS | RQ'),
(100, 'RIGHTALT'),
(102, 'HOME'),
(103, 'UP'),
(104, 'PAGEUP'),
(105, 'LEFT'),
(106, 'RIGHT'),
(107, 'END'),
(108, 'DOWN'),
(109, 'PAGEDOWN'),
(110, 'INSERT'),
(111, 'DELETE'),
(112, 'MACRO'),
(113, 'MUTE'),
(114, 'VOLUMEDOWN'),
(115, 'VOLUMEUP'),
(116, 'POWER'),
(117, 'KPEQUAL'),
(118, 'KPPLUSMINUS'),
(119, 'PAUSE'),
(121, 'KPCOMMA'),
(122, 'HANGUEL'),
(123, 'HANJA'),
(124, 'YEN'),
(125, 'LEFTMETA'),
(126, 'RIGHTMETA'),
(127, 'COMPOSE'),
(128, 'STOP'),
(140, 'CALC'),
(142, 'SLEEP'),
(143, 'WAKEUP'),
(155, 'MAIL'),
(156, 'BOOKMARKS'),
(157, 'COMPUTER'),
(158, 'BACK'),
(159, 'FORWARD'),
(163, 'NEXTSONG'),
(164, 'PLAYPAUSE'),
(165, 'PREVIOUSSONG'),
(166, 'STOPCD'),
(172, 'HOMEPAGE'),
(173, 'REFRESH'),
(183, 'F13'),
(184, 'F14'),
(185, 'F15'),
(217, 'SEARCH'),
(226, 'MEDIA'),
]
g_keycode = dict((v,k) for (k,v) in g_keycodes)
def get_keycode(ident):
return g_keycode[ident]
|
alphagov/notifications-api | migrations/versions/0178_add_filename.py | Python | mit | 1,400 | 0.007143 | """
Revision ID: 0178_add_filename
Revises: 0177_add_virus_scan_statuses
Create Date: 2018-03-14 16:15:01.886998
"""
from alembic import op
import sqlalchemy as sa
revision = '0178_add_filename'
down_revision = '0177_add_virus_scan_statuses'
def upgrade():
# Deleting the data here is ok because a full migration from the files on s3 is coming.
op.execute("DELETE FROM daily_sorted_letter")
op.add_column('daily_sorted_letter', sa.Column('file_name', sa.String(), nullable=True))
op.create_index(op.f('ix_daily_sorted_letter_file_name'), 'daily_sorted_letter', ['file_name'], unique=False)
op.create_unique_constraint('uix_file_name_billing_day', 'daily_sorted_letter', ['file_name', 'billing_day'])
op.drop_index('ix_daily_sorted_letter_billing_day', table_name='daily_sorted_letter')
op.create_index(op.f('ix_daily_sorted_letter_billing_day'), 'daily_sorted_letter', ['billing_day'], unique=False)
def downgrade():
op.drop_index(op.f('ix_daily_sorted_letter_billing_day'), table_name='daily_sorted_letter')
op.create_index('ix_daily_sorted_letter_billing_day', 'da | ily_sorted_letter', ['billing_day'], unique=True)
op.drop_constraint('uix_file_name_billing_day', 'daily_sorted_letter', type_='unique')
op.drop_index(op.f('ix_daily_sorted_letter_file_name'), table_name='daily_sorted_letter')
op.drop_column('daily_ | sorted_letter', 'file_name')
|
SKA-ScienceDataProcessor/algorithm-reference-library | tests/processing_components/test_griddata_operations.py | Python | apache-2.0 | 1,227 | 0.005705 | """ Unit tests for image operations
"""
import logging
import unittest
import numpy
from processing_components.griddata.operations import create_griddata_from_image, convert_griddata_to_image
from processing_components.simulation.testing_support import crea | te_test_image
log = logging.getLogger(__name__)
class TestGridData(unittes | t.TestCase):
def setUp(self):
from data_models.parameters import arl_path
self.dir = arl_path('test_results')
self.m31image = create_test_image(cellsize=0.0001)
self.cellsize = 180.0 * 0.0001 / numpy.pi
def test_create_griddata_from_image(self):
m31model_by_image = create_griddata_from_image(self.m31image)
assert m31model_by_image.shape[0] == self.m31image.shape[0]
assert m31model_by_image.shape[1] == self.m31image.shape[1]
assert m31model_by_image.shape[3] == self.m31image.shape[2]
assert m31model_by_image.shape[4] == self.m31image.shape[3]
def test_convert_griddata_to_image(self):
m31model_by_image = create_griddata_from_image(self.m31image)
m31_converted = convert_griddata_to_image(m31model_by_image)
if __name__ == '__main__':
unittest.main()
|
Slightgen/SG-remote | setup.py | Python | gpl-3.0 | 1,378 | 0.001451 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import os
os.system('cp config/.lircrc ~/.lircrc')
os.system('cp config/hardware.conf /etc/lirc/hardware.conf')
os.system('cp config/lircd.conf /etc/lirc/lircd.conf')
os.system('cp config/lircmd.conf /etc/lirc/lircmd.conf')
os.system('mkdir ~/.slightgen')
os.system('cp img/logo.png ~/.slightgen/logo.png')
setup(
name='SG-remote',
version='1.0.0',
url='https://github.com/Slightgen/SG-remote',
autho | r='girish joshi',
au | thor_email='girish946@gmail.com',
description=('interface for ir remote controllers provided with slight-gen minicomp'
'operate the desktop using ir remote'),
license='GPLV3',
packages=['remote'],
test_suite='',
install_requires=['pyautogui', 'python-lirc' , 'clipboard'],
keywords="operate mate-desktop ir-remote-controller",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
scripts=['r-controller']
)
|
noironetworks/aci-integration-module | aim/db/migration/alembic_migrations/versions/baccabeffa81_remove_fks.py | Python | apache-2.0 | 4,936 | 0.000203 | # Copyright (c) 2017 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove AIM DB tables' dipendencies
Revision ID: baccabeffa81
Revises: de3ed29972f1
Create Date: 2016-07-07 15:29:38.013141
"""
# revision identifiers, used by Alembic.
revision = 'baccabeffa81'
down_revision = 'de3ed29972f1'
branch_labels = None
depends_on = None
from alembic impor | t op
def upgrade():
FK = 'foreignkey'
with op.batch_alter_table('aim_bridge_domains') as bat | ch_op:
batch_op.drop_constraint('fk_bd_tn', type_=FK)
with op.batch_alter_table('aim_subnets') as batch_op:
batch_op.drop_constraint('fk_bd', type_=FK)
with op.batch_alter_table('aim_vrfs') as batch_op:
batch_op.drop_constraint('fk_vrf_tn', type_=FK)
with op.batch_alter_table('aim_app_profiles') as batch_op:
batch_op.drop_constraint('fk_ap_tn', type_=FK)
with op.batch_alter_table('aim_endpoint_groups') as batch_op:
batch_op.drop_constraint('fk_app_profile', type_=FK)
with op.batch_alter_table('aim_filters') as batch_op:
batch_op.drop_constraint('fk_flt_tn', type_=FK)
with op.batch_alter_table('aim_filter_entries') as batch_op:
batch_op.drop_constraint('fk_filter', type_=FK)
with op.batch_alter_table('aim_contracts') as batch_op:
batch_op.drop_constraint('fk_brc_tn', type_=FK)
with op.batch_alter_table('aim_contract_subjects') as batch_op:
batch_op.drop_constraint('fk_contract', type_=FK)
with op.batch_alter_table('aim_endpoints') as batch_op:
batch_op.drop_constraint('fk_epg', type_=FK)
with op.batch_alter_table('aim_l3outsides') as batch_op:
batch_op.drop_constraint('fk_l3o_tn', type_=FK)
with op.batch_alter_table('aim_external_networks') as batch_op:
batch_op.drop_constraint('fk_l3out', type_=FK)
with op.batch_alter_table('aim_external_subnets') as batch_op:
batch_op.drop_constraint('fk_ext_net', type_=FK)
with op.batch_alter_table('aim_vmm_controllers') as batch_op:
batch_op.drop_constraint('fk_vmm_controller_vmm_domain', type_=FK)
with op.batch_alter_table('aim_vmm_inj_deployments') as batch_op:
batch_op.drop_constraint('fk_inj_depl_inj_ns', type_=FK)
with op.batch_alter_table('aim_vmm_inj_replica_sets') as batch_op:
batch_op.drop_constraint('fk_inj_repl_set_inj_ns', type_=FK)
with op.batch_alter_table('aim_vmm_inj_services') as batch_op:
batch_op.drop_constraint('fk_inj_service_inj_ns', type_=FK)
with op.batch_alter_table('aim_vmm_inj_cont_groups') as batch_op:
batch_op.drop_constraint('fk_inj_group_inj_ns', type_=FK)
with op.batch_alter_table('aim_device_clusters') as batch_op:
batch_op.drop_constraint('fk_ldc_tn', type_=FK)
with op.batch_alter_table('aim_device_cluster_ifs') as batch_op:
batch_op.drop_constraint('fk_dci_dc', type_=FK)
with op.batch_alter_table('aim_concrete_devices') as batch_op:
batch_op.drop_constraint('fk_conc_dev_dc', type_=FK)
with op.batch_alter_table('aim_concrete_device_ifs') as batch_op:
batch_op.drop_constraint('fk_conc_dev_if_conc_dev', type_=FK)
with op.batch_alter_table('aim_service_graph_connections') as batch_op:
batch_op.drop_constraint('fk_sgc_sg', type_=FK)
with op.batch_alter_table('aim_service_graph_nodes') as batch_op:
batch_op.drop_constraint('fk_sgn_sg', type_=FK)
with op.batch_alter_table('aim_service_graphs') as batch_op:
batch_op.drop_constraint('fk_svcgr_tn', type_=FK)
with op.batch_alter_table('aim_service_redirect_policies') as batch_op:
batch_op.drop_constraint('fk_srp_tn', type_=FK)
with op.batch_alter_table('aim_device_cluster_contexts') as batch_op:
batch_op.drop_constraint('fk_dcctx_tn', type_=FK)
with op.batch_alter_table('aim_device_cluster_if_contexts') as batch_op:
batch_op.drop_constraint('fk_dc_if_ctx_dcctx', type_=FK)
with op.batch_alter_table('aim_security_group_subjects') as batch_op:
batch_op.drop_constraint('fk_sg_subject', type_=FK)
with op.batch_alter_table('aim_security_group_rules') as batch_op:
batch_op.drop_constraint('fk_sg_rule', type_=FK)
with op.batch_alter_table('aim_security_groups') as batch_op:
batch_op.drop_constraint('fk_sg_tn', type_=FK)
def downgrade():
pass
|
umlfri/umlfri2 | umlfri2/application/commands/diagram/moveconnectionlabel.py | Python | gpl-3.0 | 1,001 | 0.007992 | from umlfri2.application.commands.base import Command
from umlfri2.application.events.diagram import ConnectionMovedEvent
class MoveConnectionLabelCommand(Command):
def __init__(self, connection_label, delta): |
self.__diagram_name = connection_label.connection.diagram.get_display_name()
self.__connection_label = connection_label
self.__delta = delta
self.__label_position = None
@property
def description(self):
return "Moved label on connection in diagram {0}".format(self.__diagram_name) |
def _do(self, ruler):
self.__label_position = self.__connection_label.get_position(ruler)
self._redo(ruler)
def _redo(self, ruler):
self.__connection_label.move(ruler, self.__label_position + self.__delta)
def _undo(self, ruler):
self.__connection_label.move(ruler, self.__label_position)
def get_updates(self):
yield ConnectionMovedEvent(self.__connection_label.connection)
|
hcwiley/the-front | the_front/the_front/artist/migrations/0003_auto__add_field_artistmedia_is_default_image.py | Python | gpl-2.0 | 5,455 | 0.007883 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ArtistMedia.is_default_image'
db.add_column(u'artist_artistmedia', 'is_default_image',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ArtistMedia.is_default_image'
db.delete_column(u'artist_artistmedia', 'is_default_image')
models = {
u'artist.artist': {
'Meta': {'object_name': 'Artist'},
'artist_statement': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'artist.artistmedia': {
'Meta': {'object_name': 'ArtistMedia'},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['artist.Artist']"}),
'full_res_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_default_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
| 'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to' | : u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['artist'] |
CarlosTenorio/vespapp-web | api/migrations/0019_auto_20160420_1833.py | Python | gpl-3.0 | 766 | 0.001305 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-20 16:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0018_auto_20160420_1231'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_answer', to='api.Question', verbose_name='Pregunta'),
| ),
migrati | ons.AlterField(
model_name='answer',
name='value',
field=models.CharField(max_length=128, verbose_name='Respuesta'),
),
]
|
fstagni/DIRAC | FrameworkSystem/scripts/dirac-admin-proxy-upload.py | Python | gpl-3.0 | 650 | 0.009231 | #!/usr/bin/env python
########################################################################
# | File : dirac-admin-proxy-upload.py
# Author : Adrian Casajus
########################################################################
from __future__ import print_function
import sys
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.ProxyUpload import CLIParams, uploadProxy
__RCSID__ = "$Id$"
if __name__ == "__main__":
cliParams = CLIParams()
cliParams.registerCLISwitches()
Script.parseCommandLine()
retVal = uploadProxy(cliParams) |
if not retVal['OK']:
print(retVal['Message'])
sys.exit(1)
sys.exit(0)
|
spulec/moto | tests/test_cloudformation/fixtures/ec2_classic_eip.py | Python | apache-2.0 | 64 | 0 | template = {"Res | ources": {"EC2EIP": {"Type": "AWS::EC2::EIP" | }}}
|
RedHatQE/rhui-testing-tools | rhuilib/rhuimanager_sync.py | Python | gpl-3.0 | 3,081 | 0.004869 | """ RHUIManager Sync functions """
import re
from stitches.expect import Expect
from rhuilib.rhuimanager import RHUIManager
from rhuilib.util import Util
class RHUIManagerSync(object):
'''
Represents -= Synchronization Status =- RHUI screen
'''
@staticmethod
def sync_cds(connection, cdslist):
'''
sync an individual CDS immediately
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "sc")
RHUIManager.select(connection, cdslist)
RHUIManager.proceed_with_check(connection, "The following CDS instances will be scheduled for synchronization:", cdslist)
RHUIManager.quit(connection)
@staticmethod
def sync_cluster(connection, clusterlist):
'''
sync a CDS cluster immediately
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "sl")
RHUIManager.select(connection, clusterlist)
RHUIManager.proceed_with_check(connection, "The following CDS clusters will be scheduled for synchronization:", clusterlist)
RHUIManager.quit(connection)
@staticmethod
def get_cds_status(connection, cdsname):
'''
display CDS sync summary
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "dc")
res_list = Expect.match(connection, re.compile(".*\n" + cdsname.replace(".", "\.") + "[\.\s]* | \[([^\n]*)\].*" + cdsname.replace(".", "\.") + "\s*\r\n([^\n]*)\r\n", re.DOTALL), [1, 2], 60)
connection.cli.exec_command("killall -s SIGINT rhui-manager")
ret_list | = []
for val in [res_list[0]] + res_list[1].split(" "):
val = Util.uncolorify(val.strip())
ret_list.append(val)
RHUIManager.quit(connection)
return ret_list
@staticmethod
def sync_repo(connection, repolist):
'''
sync an individual repository immediately
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "sr")
Expect.expect(connection, "Select one or more repositories.*for more commands:", 60)
Expect.enter(connection, "l")
RHUIManager.select(connection, repolist)
RHUIManager.proceed_with_check(connection, "The following repositories will be scheduled for synchronization:", repolist)
RHUIManager.quit(connection)
@staticmethod
def get_repo_status(connection, reponame):
'''
display repo sync summary
'''
RHUIManager.screen(connection, "sync")
Expect.enter(connection, "dr")
reponame_quoted = reponame.replace(".", "\.")
res = Expect.match(connection, re.compile(".*" + reponame_quoted + "\s*\r\n([^\n]*)\r\n.*", re.DOTALL), [1], 60)[0]
connection.cli.exec_command("killall -s SIGINT rhui-manager")
res = Util.uncolorify(res)
ret_list = res.split(" ")
for i in range(len(ret_list)):
ret_list[i] = ret_list[i].strip()
RHUIManager.quit(connection)
return ret_list
|
kubevirt/vAdvisor | tests/store/test_event.py | Python | gpl-3.0 | 1,720 | 0 | from vadvisor.store.event import InMemoryStore
import pytest
from freezegun import freeze_time
from datetime import datetime, timedelta
@pytest.fixture
@freeze_time("2012-01-14 03:00:00")
def expired_store():
store = InMemoryStore(60)
# Insert old data
store.put('old')
store.put('old')
store.put('old')
return store
@pytest.fixture
@freeze_time("2012-01-14 03:01:30")
def new_store(expired_store):
# Insert newer data
expired_store.put('new')
expired_store.put('new')
expired_store.put('new')
return expired_store
@pytest.fixture
@freeze_time("2012-01-14 03:01:50")
def newest_store(new_store):
# Insert newer data
new_store.put('newest')
new_store.put('newest')
new_store.put('newest')
return new_store
def test_empty_store():
store = InMemoryStore()
assert store.get() == []
@freeze_time("2012-01-14 03:02:00")
def test_expire_on_get(expired_store):
expired_store.get()
assert expired_store.get() == []
@freeze_time("2012-01-14 03:02:00")
def test_get_all_new(new_store):
assert new_store.get() == ['new', 'new', 'new']
@freeze_time("2012-01-14 03:02:00")
def test_get_two_new(new_store | ):
assert new_store.get(elements=2) == ['new', 'new']
@freeze_time("2012-01-14 03:02:00")
def test_get_not_older_than(newest_store):
events = newest_store.get(
elements=2,
start_time=datetime.utcnow() - timedelta(seconds=20)
)
assert events == ['newest', 'newest']
@freeze_time("2012-01-14 03:02:00")
def test_get_not_newer_than(newest_store):
events = newest_store.get(
eleme | nts=2,
stop_time=datetime.utcnow() - timedelta(seconds=20)
)
assert events == ['new', 'new']
|
Nadeflore/dakara-player-vlc | tests/test_background_loader.py | Python | mit | 9,182 | 0.002069 | from unittest import TestCase
from unittest.mock import call, patch
from path import Path
from dakara_player_vlc.background_loader import (
BackgroundLoader,
BackgroundNotFoundError,
)
class BackgroundLoaderTestCase(TestCase):
"""Test the loader for backgrounds
"""
@patch(
"dakara_player_vlc.backg | round_loader.exists", return_value=True, autospec=True
)
def test_load_default_name_default_directory(self, mocked_exists):
"""Test to load one default background from defauld directory
"""
# create the instance
loader = BackgroundLoader(
default_directory=Path("default"),
| default_background_filenames={"background": "background.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds,
{"background": Path("default/background.png").normpath()},
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("default/background.png").normpath())
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_default_name_custom_directory(self, mocked_exists):
"""Test to load one default background from custom directory
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds, {"background": Path("custom/background.png").normpath()}
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("custom/background.png").normpath())
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_custom_name_custom_directory(self, mocked_exists):
"""Test to load one custom background from custom directory
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds, {"background": Path("custom/custom.png").normpath()}
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("custom/custom.png").normpath())
@patch(
"dakara_player_vlc.background_loader.exists", return_value=True, autospec=True
)
def test_load_custom_name_default_directory(self, mocked_exists):
"""Test to load one custom background from default directory
Should load default background from default directory.
"""
# create the instance
loader = BackgroundLoader(
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
background_filenames={"background": "other.png"},
)
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds,
{"background": Path("default/background.png").normpath()},
)
# assert the call of the mocked method
mocked_exists.assert_called_with(Path("default/background.png").normpath())
@patch("dakara_player_vlc.background_loader.exists", autospec=True)
def test_load_fallback_default_name_custom_directory(self, mocked_exists):
"""Test to fallback to load one default background from custom directory
Was initially trying to load one custom background from custom directory.
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# setup mock
mocked_exists.side_effect = [False, True]
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds, {"background": Path("custom/background.png").normpath()}
)
# assert the call of the mocked method
mocked_exists.assert_has_calls(
[
call(Path("custom/custom.png").normpath()),
call(Path("custom/background.png").normpath()),
]
)
@patch("dakara_player_vlc.background_loader.exists", autospec=True)
def test_load_fallback_default_name_default_directory(self, mocked_exists):
"""Test to fallback to load one default background from default directory
Was initially trying to load one custom background from custom directory.
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# setup mock
mocked_exists.side_effect = [False, False, True]
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
loader.load()
# assert the backgrounds
self.assertDictEqual(
loader.backgrounds,
{"background": Path("default/background.png").normpath()},
)
# assert the call of the mocked method
mocked_exists.assert_has_calls(
[
call(Path("custom/custom.png").normpath()),
call(Path("custom/background.png").normpath()),
call(Path("default/background.png").normpath()),
]
)
@patch("dakara_player_vlc.background_loader.exists", autospec=True)
def test_load_error(self, mocked_exists):
"""Test to load one unexisting background
Was initially trying to load one custom background from custom directory.
"""
# create the instance
loader = BackgroundLoader(
directory=Path("custom"),
background_filenames={"background": "custom.png"},
default_directory=Path("default"),
default_background_filenames={"background": "background.png"},
)
# setup mock
mocked_exists.side_effect = [False, False, False]
# pre assert that there are no backgrounds
self.assertDictEqual(loader.backgrounds, {})
# load the backgrounds
with self.assertRaises(BackgroundNotFoundError) as error:
loader.load()
# assert the error
self.assertEqual(
str(error.exception), "Unable to find a background file for background"
)
# assert the backgrounds
self.assertDictEqual(loader.backgrounds, {})
# assert the call of the mocked method
mocked_exists.assert_has_calls(
[
call(Path("custom/custom.png").normpath()),
call(Path("custom/background.png").normpath()),
cal |
CygnusNetworks/nssct | nssct/controller.py | Python | gpl-2.0 | 2,163 | 0.02589 | # -*- encoding: utf-8 -*-
import logging
from . import report
logger = logging.getLogger(__name__)
class Controller(object):
"""The controller keeps the pieces (engine, collector, and plugins)
together. The collector is just passed on to the plugins, the controller
does not operate itself on a collector. A plugin is a function that takes
references to the controller and the collector and returns a future.
Plugins will access the engine attribute of the controller to query SNMP
OIDs. They can also use the start_plugin method to start further plugins.
The main reason to use a controller object instead of just starting
plugins is to notice when a plugin fails to complete. Without the
controller, a missing callback invocation could abort a plugin without
anything noticing.
"""
def __init__(self, engine):
self.engine = engine
self.pending_plugins = []
def start_plugin(self, collector, plugin):
"""Start the given plugin with the given collector.
@type collector: Collector
"""
logger.debug("starting plugin %r", plugin)
def completion(fut):
self.pending_plugins.remove(fut)
try:
fut.result()
except Exception as exc:
logger.error("plugin %r failed to complete due to %r", plugin, exc, exc_info=True)
collector.add_alert(report.Alert(report.CRITICAL, "plugin %r failed to complete with error %r" % (plugin, exc)))
else:
logger.debug("completed plugin %r", plugin)
try:
fut = plugin(self, collector)
except Exception:
logger.exception("swallowing exception from plugin")
else:
self.pending_plugins.append(fut)
fut.add_done_callback(completion)
def step(self):
"""Run an engine step and return whether more steps are needed to finish
the starte | d plugins.
@rtype: bool
"""
workleft = self.engine.step()
if self.pending_plugins and not workleft:
logger.error("some plugins failed to complete")
r | eturn False
return bool(self.pending_plugins)
def run(self, collector, plugins):
"""Start the given plugins and iterate engine steps until all plugins
finish."""
for plugin in plugins:
self.start_plugin(collector, plugin)
while self.step():
pass
|
dcos/shakedown | tests/acceptance/test_dcos_package_cli.py | Python | apache-2.0 | 414 | 0.004831 | from shakedown import *
def test_install_package_cli():
assert not package_installed('dcos-enterprise-cli')
install_package_and_wait('dcos-enterprise-cli')
assert package_installed('dcos-enterprise-cli')
def te | st_uninstall_package_cli():
assert package_installed('dcos-enterprise-cli')
uninstall_package_and_wait('dcos-enterprise-cli')
assert not package_installed('dco | s-enterprise-cli')
|
googleapis/artman | artman/tasks/python_grpc_tasks.py | Python | apache-2.0 | 10,137 | 0.000099 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks related to Python gRPC code generation"""
import io
import os
import re
import tempfile
import time
from ruamel import yaml
from artman.utils import protoc_utils
from artman.tasks import task_base
class PythonChangePackageTask(task_base.TaskBase):
"""Copies source protos to a package that meets Python convention"""
default_provides = ('final_src_proto_path',
'final_import_proto_path')
_IDENTIFIER = '[A-Za-z_][A-Za-z_0-9]*'
_BASE_PROTO_REGEX = (
'(?P<prefix>{prefix})' +
'(?P<package>' + _IDENTIFIER +
'({separator}' + _IDENTIFIER + ')*{package_suffix})'
'(?P<suffix>{suffix})')
# E.g., `package google.foo.bar`
_PACKAGE_REGEX = re.compile(_BASE_PROTO_REGEX.format(
prefix='^package ',
separator='\\.',
package_suffix='',
suffix=''))
# E.g., `import "google/foo/bar";`
_IMPORT_REGEX = re.compile(_BASE_PROTO_REGEX.format(
prefix='^import (?:public )?"',
separator='/',
package_suffix='\\.proto',
suffix='";'))
# TODO (geigerj): add regex for documentation link updates?
def execute(self, src_proto_path, import_proto_path,
organization_name):
self._organization_name = organization_name
# Treat google.protobuf, google.iam as a common proto package, even
# though they are not included in the common-protos we generate.
#
# TODO (geigerj): remove 'google.iam' when it is included in the common
# protos package.
common_protos = [
'google.protobuf',
'google.iam',
'google.api',
'google.longrunning',
'google.rpc',
'google.type',
'google.logging.type',
]
tmpdir = os.path.join(
tempfile.gettempdir(), 'artman-python', str(int(time.time())))
new_proto_dir = os.path.join(tmpdir, 'proto')
new_src_path = set()
new_import_path = [new_proto_dir]
self._copy_and_transform_directories(
src_proto_path, new_proto_dir, common_protos, paths=new_src_path)
self._copy_and_transform_directories(
import_proto_path, new_proto_dir, common_protos)
# Update src_proto_path, import_proto_path
return list(new_src_path), new_import_path
def _extract_base_dirs(self, proto_file):
"""Return the proto file path derived from the package name."""
with io.open(proto_file, 'rt', encoding='UTF-8') as proto:
for line in proto:
pkg = self._PACKAGE_REGEX.match(line)
if pkg:
pkg = pkg.group('package')
return os.path.sep.join(pkg.split('.'))
return ''
def _transform(self, pkg, sep, common_protos):
"""Transform to the appropriate proto package layout.
Works with arbitrary separator (e.g., '/' for import statements,
'.' for proto package statements, os.path.sep for filenames)
"""
if sep != '.' and pkg.endswith('.proto'):
dotted = pkg[:-6].replace(sep, '.')
suffix = '.proto'
else:
dotted = pkg.replace(sep, '.')
suffix = ''
# Sanity check: Do not transform common protos.
for common_pkg in common_protos:
if dotted.startswith(common_pkg):
return pkg
# Special case: If the organization name is "google-cloud", then we
# have to ensure that "cloud" exists in the path. The protos
# themselves may not follow this.
if 'cloud' not in dotted and self._organization_name == 'google-cloud':
dotted = dotted.replace('google.', 'google.cloud.', 1)
# Transform into the ideal proto path.
# What essentially should happen here is that "{api}.{vN}" should
# change to "{api}_{vN}".
dotted = re.sub(r'\.v([\da-z_]*)([\d]+)\b', r'_v\1\2.proto', dotted)
# Edge case: Some internal customers use "vNalpha" and "vNbeta".
# Rather than make the regular expression more complicated, catch
# this as a one-off.
if re.search(r'\.v[\d]+alpha\b', dotted):
dotted = re.sub(r'\.v([\d]+)alpha\b', r'_v\1alpha.proto', dotted)
if re.search(r'\.v[\d]+beta\b', dotted):
dotted = re.sub(r'\.v([\d]+)beta\b', r'_v\1beta.proto', dotted)
if re.search(r'\.v[\d]+eap\b', dotted):
dotted = re.sub(r'\.v([\d]+)eap\b', r'_v\1eap.proto', dotted)
# Done; return with the appropriate separator.
return dotted.replace('.', sep) + suffix
def _copy_proto(self, src, dest, common_protos):
"""Copies a proto while fixing its imports"""
with io.open(src, 'r', encoding='UTF-8') as src_lines:
with io.open(dest, 'w+', encoding='UTF-8') as dest_file:
for line in src_lines:
import_ = self._IMPORT_REGEX.match(line)
if import_:
dest_file.write('import "{}";\n'.format(
self._transform(
import_.group('package'), '/', common_protos)))
else:
dest_file.write(line)
def _copy_and_transform_directories(
self, src_directories, destination_directory, common_protos,
paths=None):
for path in src_directories:
| protos = list(protoc_utils.find_protos([path], []))
for proto in protos:
src_base_dirs = self._extract_base_dirs(proto)
sub_new_src = os.pa | th.join(
destination_directory,
self._transform(
src_base_dirs, os.path.sep, common_protos))
if paths is not None:
paths.add(sub_new_src)
dest = os.path.join(sub_new_src, os.path.basename(proto))
if not os.path.exists(dest):
self.exec_command(['mkdir', '-p', sub_new_src])
self._copy_proto(
proto, os.path.join(sub_new_src, dest), common_protos)
class PythonMoveProtosTask(task_base.TaskBase):
default_provides = {'grpc_code_dir'}
def execute(self, grpc_code_dir, gapic_code_dir):
"""Move the protos into the GAPIC structure.
This copies the ``x/y/z/proto/`` directory over to be a sibling
of ``x/y/z/gapic/`` in the GAPIC code directory. In the event of
an inconsistency on the prefix, the GAPIC wins.
Args:
grpc_code_dir (str): The location where the GRPC code was
generated.
gapic_code_dir (str): The location where the GAPIC code was
generated.
"""
# Determine the appropriate source and target directory.
# We can get this by drilling in to the GAPIC artifact until we get to
# a "gapic" directory that is outside "docs" and "tests".
src = self._get_proto_path(grpc_code_dir)
target = self._get_gapic_subdir_path(gapic_code_dir)
# Move the contents into the GAPIC directory.
self.exec_command(['mv', src, os.path.join(target, 'proto')])
# Create an __init__.py file in the proto directory.
# This is necessary for Python 2.7 compatibility.
self.exec_command([
'touch', os.path.join(target, 'proto', '__init__.py'),
])
# Remove the grpc directory.
self.exec_command(['rm', '-rf', grpc_ |
alexbruy/QGIS | python/plugins/processing/algs/grass7/ext/r_li_mps.py | Python | gpl-2.0 | 1,280 | 0 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_li_mps.py
-----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify * |
* it under the terms of the GNU General Public License as published by *
| * the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from r_li import checkMovingWindow, configFile
def checkParameterValuesBeforeExecuting(alg):
return checkMovingWindow(alg)
def processCommand(alg):
configFile(alg)
|
candlepin/virt-who | tests/suds/test_reader.py | Python | gpl-2.0 | 4,360 | 0.002753 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or mod | ify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; with | out even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( jurko.gospodnetic@pke.hr )
"""
suds.reader module unit tests.
Implemented using the 'pytest' testing framework.
"""
import os
import pytest
import testutils
if __name__ == "__main__":
testutils.run_using_pytest(globals())
import virtwho.virt.esx.suds
import virtwho.virt.esx.suds.options
import virtwho.virt.esx.suds.reader
class TestCacheItemNameMangling:
"""Tests virtwho.virt.esx.suds.reader.Reader classes' cache item name mangling."""
def test_different(self):
test_item_name1 = "oh my god"
test_item_name2 = "ha ha ha"
test_item_suffix = "that's some funky sh*t"
reader = virtwho.virt.esx.suds.reader.Reader(virtwho.virt.esx.suds.options.Options())
mangled1 = reader.mangle(test_item_name1, test_item_suffix)
mangled2 = reader.mangle(test_item_name2, test_item_suffix)
assert mangled1 != mangled2
@pytest.mark.skipif(os.getuid()!=0, reason="This test will only run as root")
def test_inter_processes_persistence(self, tmpdir):
"""
Same cache item names must be mangled the same in different processes.
This is a regression test against using a built-in Python hash()
function internally since that function may be seeded by a process
specific random seed. This Python interpreter behaviour has been
enabled by default since Python 3.3 and may be explicitly enabled on
earlier Python interpreter versions as well.
"""
test_item_name = "test string"
test_item_suffix = "test suffix"
reader = virtwho.virt.esx.suds.reader.Reader(virtwho.virt.esx.suds.options.Options())
expected = reader.mangle(test_item_name, test_item_suffix)
test_file = tmpdir.join("test_mangle.py")
test_file.write("""
import virtwho.virt.esx.suds.options
import virtwho.virt.esx.suds.reader
reader = virtwho.virt.esx.suds.reader.Reader(virtwho.virt.esx.suds.options.Options())
mangled = reader.mangle("%(test_item_name)s", "%(test_item_suffix)s")
assert mangled == '%(expected)s'
""" % {"expected": expected,
"test_item_name": test_item_name,
"test_item_suffix": test_item_suffix})
testutils.run_test_process(test_file)
def test_repeatable__different_readers(self):
test_item_name = "R2D2"
test_item_suffix = "C3P0"
reader1 = virtwho.virt.esx.suds.reader.Reader(virtwho.virt.esx.suds.options.Options())
reader2 = virtwho.virt.esx.suds.reader.Reader(virtwho.virt.esx.suds.options.Options())
mangled1 = reader1.mangle(test_item_name, test_item_suffix)
mangled2 = reader2.mangle(test_item_name, test_item_suffix)
assert mangled1 == mangled2
def test_repeatable__same_reader(self):
test_item_name = "han solo"
test_item_suffix = "chewbacca"
reader = virtwho.virt.esx.suds.reader.Reader(virtwho.virt.esx.suds.options.Options())
mangled1 = reader.mangle(test_item_name, test_item_suffix)
mangled2 = reader.mangle(test_item_name, test_item_suffix)
assert mangled1 == mangled2
def test_suffix(self):
test_item_name = "and a one! and a two! and a one - two - three!"
test_item_suffix = "pimpl"
reader = virtwho.virt.esx.suds.reader.Reader(virtwho.virt.esx.suds.options.Options())
mangled = reader.mangle(test_item_name, test_item_suffix)
assert mangled.endswith(test_item_suffix)
|
wwj718/ANALYSE | common/lib/xmodule/xmodule/modulestore/mixed.py | Python | agpl-3.0 | 34,661 | 0.003347 | """
MixedModuleStore allows for aggregation between multiple modulestores.
In this way, courses can be served up both - say - XMLModuleStore or MongoModuleStore
"""
import logging
from contextlib import contextmanager
import itertools
import functools
from contracts import contract, new_contract
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.assetstore import AssetMetadata
from . import ModuleStoreWriteBase
from . import ModuleStoreEnum
from .exceptions import ItemNotFoundError, DuplicateCourseError
from .draft_and_published import ModuleStoreDraftAndPublished
from .split_migrator import SplitMigrator
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
log = logging.getLogger(__name__)
def strip_key(func):
"""
A decorator for stripping version and branch information from return values that are, or contain, UsageKeys or
CourseKeys.
Additionally, the decorated function is called with an optional 'field_decorator' parameter that can be used
to strip any location(-containing) fields, which are not directly returned by the function.
The behavior can be controlled by passing 'remove_version' and 'remove_branch' booleans to the decorated
function's kwargs.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
"""
Supported kwargs:
remove_version - If True, calls 'version_agnostic' on all return values, including those in lists and dicts.
remove_branch - If True, calls 'for_branch(None)' on all return values, including those in lists and dicts.
Note: The 'field_decorator' parameter passed to the decorated function is a function that honors the
values of these kwargs.
"""
# remove version and branch, by default
rem_vers = kwargs.pop('remove_version', True)
rem_branch = kwargs.pop('remove_branch', True)
# helper function for stripping individual values
def strip_key_func(val):
"""
Strips the version and branch information according to the settings of rem_vers and rem_branch.
Recursively calls this function if the given value has a 'location' attribute.
"""
retval = val
if rem_vers and hasattr(retval, 'version_agnostic'):
retval = retval.version_agnostic()
if rem_branch and hasattr(retval, 'for_branch'):
retval = retval.for_branch(None)
if hasattr(retval, 'location'):
retval.location = strip_key_func(retval.location)
return retval
# function for stripping both, collection of, and individual, values
def strip_key_collection(field_value):
"""
Calls strip_key_func for each element in the given value.
"""
if rem_vers or rem_branch:
if isinstance(field_value, list):
field_value = [strip_key_func(fv) for fv in field_value]
elif isinstance(field_value, dict):
for key, val in field_value.iteritems():
field_value[key] = strip_key_func(val)
else:
field_value = strip_key_func(field_value)
return field_value
# call the decorated function
retval = func(field_decorator=strip_key_collection, *args, **kwargs)
# strip the return value
return strip_key_collection(retval)
return inner
class MixedModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase):
"""
ModuleStore knows how to route requests to the right persistence ms
"""
def __init__(self, contentstore, mappings, stores, i18n_service=None, fs_service=None, create_modulestore_instance=None, **kwargs):
"""
Initialize a MixedModuleStore. Here we look into our passed in kwargs which should be a
collection of other modulestore configuration information
"""
super(MixedModuleStore, self).__init__(contentstore, **kwargs)
if create_modulestore_instance is None:
raise ValueError('MixedModuleStore constructor must be passed a create_modulestore_instance function')
self.modulestores = []
self.mappings = {}
for course_id, store_name in mappings.iteritems():
try:
self.mappings[CourseKey.from_string(course_id)] = store_name
except InvalidKeyError:
try:
self.mappings[SlashSeparatedCourseKey.from_deprecated_string(course_id)] = store_name
except InvalidKeyError:
log.exception("Invalid MixedModuleStore configuration. Unable to parse course_id %r", course_id)
continue
for store_settings in stores:
key = store_settings['NAME']
is_xml = 'XMLModuleStore' in store_settings['ENGINE']
if is_xml:
# restrict xml to only load courses in mapping
store_settings['OPTIONS']['course_ids'] = [
course_key.to_deprecated_string()
for course_key, store_key in self.mappings.iteritems()
if store_key == key
]
store = create_modulestore_instance(
store_settings['ENGINE'],
self.contentstore,
store_settings.get('DOC_STORE_CONFIG', {}),
store_settings.get('OPTIONS', {}),
i18n_service=i18n_service,
fs_service=fs_service,
)
# replace all named pointers to the store into actual pointers
for course_key, store_name in self.mappings.iteritems():
if store_name == key:
self.mappings[course_key] = store
self.modulestores.append(store)
def _clean_course_id_for_mapping(self, course_id):
"""
In order for mapping to work, the course_id must be minimal--no version, no branch--
as we never store one version or one branch in one ms and another in another ms.
:param course_id: the CourseKey
"""
if hasattr(course_id, 'version_agnostic'):
course_id = course_id.version_agnostic()
if hasattr(course_id, 'branch'):
course_id = course_id.replace(branch=None)
return course_id
def _get_modulestore_for_courseid(self, course_id=None):
"""
For a given course_id, look in the mapping table and see if it has been pinned
to a particular modulestore
If course_id is None, returns the first (ordered) store as the default
"""
if course_id is not None:
course_id = self._clean_course_id_for_mapping(course_id)
mapping = self.mappings.get(course_id, None)
if mapping is not None:
return mapping
else:
for store in self.modulestores:
if store.has_course(course_id):
self.mappings[course_id] = store
return store
# return the default store
return self.default_modulestore
def _get_modulestore_by_type(self, modulestore_type):
"""
This method should only really be used by tests and migration scripts when necessary.
Returns the module store as requested by type. The type can be a value from ModuleStoreEnum.Type.
"""
for store in self.modulestores:
if store.get | _modulestore_type() == modulestore_type:
return store
return None
def fill_in_run(self, course_key):
"""
Some course_keys are used without runs. This function calls the corresponding
fill_in_run fu | nction on the appropriate modulestore.
"""
store = self._get_modulestore_for_courseid(course_key)
if not hasattr(store, 'fill_in_run'):
return course_key
return store |
wh20160213/WuhuaLearnToPython | python_study_level1/package1/studya4_socket_client.py | Python | lgpl-3.0 | 445 | 0.011331 | #!/usr/bin/python3
# 文件名:client.py
# 导入 socket、sys 模块
import socket
import sys
# 创建 socket 对象
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 获取本地主机 | 名
host = socket.gethostname()
# 设置端口好
port = 9999
# 连接服务,指定主机和端口
s.connect((host, port))
# 接收小于 1024 字节的数据
msg = s.recv(1024)
s.close | ()
print (msg.decode('utf-8')) |
tensorflow/minigo | bigtable_input.py | Python | apache-2.0 | 29,247 | 0.000615 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read Minigo game examples from a Bigtable.
"""
import bisect
import collections
import datetime
import math
import multiprocessing
import operator
import re
import struct
import time
import numpy as np
from tqdm import tqdm
from absl import flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters as bigtable_row_filters
from google.cloud.bigtable import column_family as bigtable_column_family
import tensorflow as tf
from tensorflow.contrib import cloud as contrib_cloud
import utils
flags.DEFINE_string('cbt_project', None,
'The project used to connect to the cloud bigtable ')
# cbt_instance: identifier of Cloud Bigtable instance in cbt_project.
flags.DEFINE_string('cbt_instance', None,
'The identifier of the cloud bigtable instance in cbt_project')
# cbt_table: identifier of Cloud Bigtable table in cbt_instance.
# The cbt_table is expected to be accompanied by one with an "-nr"
# suffix, for "no-resign".
flags.DEFINE_string('cbt_table', None,
'The table within the cloud bigtable instance to use')
FLAGS = flags.FLAGS
# Constants
ROW_PREFIX = 'g_{:0>10}_'
ROWCOUNT_PREFIX = 'ct_{:0>10}_'
# Model tabels (models, models_for_eval) row key
MODEL_PREFIX = "m_{run}_{num:0>10}"
# Name of model
MODEL_NAME = b'model'
# Maximum number of concurrent processes to use when issuing requests against
# Bigtable. Value taken from default in the load-testing tool described here:
#
# https://github.com/googleapis/google-cloud-go/blob/master/bigtable/cmd/loadtest/loadtest.go
MAX_BT_CONCURRENCY = 100
# Column family and qualifier constants.
# Column Families
METADATA = 'metadata'
TFEXAMPLE = 'tfexample'
# Column Qualifiers
# Note that in CBT, families are strings and qualifiers are bytes.
TABLE_STATE = b'table_state'
WAIT_CELL = b'wait_for_game_number'
GAME_COUNTER = b'game_counter'
MOVE_COUNT = b'move_count'
# Patterns
_game_row_key = re.compile(r'g_(\d+)_m_(\d+)')
_game_from_counter = re.compile(r'ct_(\d+)_')
# The string information needed to construct a client of a Bigtable table.
BigtableSpec = collections.namedtuple(
'BigtableSpec',
['project', 'instance', 'table'])
# Information needed to create a mix of two Game queues.
# r = resign/regular; c = calibration (no-resign)
GameMix = collections.namedtuple(
'GameMix',
['games_r', 'moves_r',
'games_c', 'moves_c',
'selection'])
def cbt_intvalue(value):
"""Decode a big-endian uint64.
Cloud Bigtable stores integers as big-endian uint64,
and performs this translation when integers are being
set. But when being read, the values need to be
decoded.
"""
return int(struct.unpack('>q', value)[0])
def make_single_array(ds, batch_size=8*1024):
"""Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
Returns:
a single numpy array.
"""
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalars (TensorShape=[])')
batches = []
with tf.Session() as sess:
ds = ds.batch(batch_size)
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
with tqdm(desc='Elements', unit_scale=1) as pbar:
try:
while True:
batches.append(sess.run(get_next))
pbar.update(len(batches[-1]))
except tf.errors.OutOfRangeError:
pass
if batches:
return np.concatenate(batches)
return np.array([], dtype=ds.output_types.as_numpy_dtype)
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
"""Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter
"""
ds = ds.batch(batch_size)
# Turns 'g_0000001234_m_133' into 'g_0000001234'
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
while True:
h.update(sess.run(get_next))
except tf.errors.OutOfRangeError:
pass
# NOTE: Cannot be truly sure the count is right till the end.
return h
def _game_keys_as_array(ds):
"""Turn keys of a Bigtable dataset into an array.
Take g_GGG_m_MMM and create GGG.MMM numbers.
Valuable when visualizing the distribution of a given dataset in
the game keyspace.
"""
ds = ds.map(lambda row_key, cell: row_key)
# want 'g_0000001234_m_133' is '0000001234.133' and so forth
ds = ds.map(lambda x:
tf.strings.to_number(tf.strings.substr(x, 2, 10) +
'.' +
tf.strings.substr(x, 15, 3),
out_type=tf.float64))
return make_single_array(ds)
def _delete_rows(args):
"""Delete the given row keys from the given Bigtable.
The args are (BigtableSpec, row_keys), but are passed
as a single argument in order to work with
multiprocessing.Pool.map. This is also the reason why this is a
top-level function instead of a method.
"""
btspec, row_keys = args
bt_table = bigtable.Client(btspec.project).instance(
btspec.instance).table(btspec.table)
rows = [bt_table.row(k) for k in row_keys]
for r in rows:
r.delete()
bt_table.mutate_rows(rows)
return row_keys
class GameQueue:
"""Queue of games stored in a Cloud Bigtable.
The state of the table is stored in the `table_state`
row, which includes the columns `metadata:game_counter`.
"""
def __init__(self, project_name, instance_name, table_name):
"""Constructor.
Args:
project_name: string name of GCP project having table.
instance_name: string name of CBT instance in project.
table_name: string name of CBT table in instance.
"""
self.btspec = BigtableSpec(project_name, instance_name, table_name)
self.bt_table = bigtable.Client(
self.btspec.project, admin=True).instance(
self.btspec.instance).table(self.btspec.table)
self.tf_table = contrib_cloud.BigtableClient(
self.btspec.project,
self.btspec.instance).table(self.btspec.table)
def create(self):
"""Create the table underlying the queue.
Create the 'metadata' and 'tfexample' column families
and their properties.
"""
if self.bt_table.exists():
utils.dbg('Table already exists')
| return
max_versions_rule = bigtable_column_family.MaxVersionsGCRule(1)
self.bt_table.create(column_families={
METADATA: max_versions_rule,
TFEXAMPLE: max_versions_rule})
@prop | erty
def latest_game_number(self):
"""Re |
krishna11888/ai | third_party/pattern/pattern/graph/commonsense.py | Python | gpl-2.0 | 11,504 | 0.007476 | #### PATTERN | COMMONSENSE #########################################################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
from codecs import BOM_UTF8
from urllib import urlopen
from itertools import chain
from __init__ import Graph, Node, Edge, bfs
from __init__ import WEIGHT, CENTRALITY, EIGENVECTOR, BETWEENNESS
import os
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
#### COMMONSENSE SEMANTIC NETWORK ##################################################################
#--- CONCEPT ---------------------------------------------------------------------------------------
class Concept(Node):
def __init__(self, *args, **kwargs):
""" A concept in the sematic network.
"""
Node.__init__(self, *args, **kwargs)
self._properties = None
@property
def halo(self, depth=2):
""" Returns the concept halo: a list with this concept + surrounding concepts.
This is useful to reason more fluidly about the concept,
since the halo will include latent properties linked to nearby concepts.
"""
return self.flatten(depth=depth)
@property
def properties(self):
""" Returns the top properties in the concept halo, sorted by betweenness centrality.
The return value is a list of concept id's instead of Concepts (for performance).
"""
if self._properties is None:
g = self.graph.copy(nodes=self.halo)
p = (n for n in g.nodes if n.id in self.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: n.centrality))]
self._properties = p
return self._properties
def halo(concept, depth=2):
return concept.flatten(depth=depth)
def properties(concept, depth=2, centrality=BETWEENNESS):
g = concept.graph.copy(nodes=halo(concept, depth))
p = (n for n in g.nodes if n.id in concept.graph.properties)
p = [n.id for n in reversed(sorted(p, key=lambda n: getattr(n, centrality)))]
return p
#--- RELATION --------------------------------------------------------------------------------------
class Relation(Edge):
def __init__(self, *args, **kwargs):
""" A relation between two concepts, with an optional context.
For example, "Felix is-a cat" is in the "media" context, "tiger is-a cat" in "nature".
"""
self.context = kwargs.pop("context", None)
Edge.__init__(self, *args, **kwargs)
#--- HEURISTICS ------------------------------------------------------------------------------------
# Similarity between concepts is measured using a featural approach:
# a comparison of the features/properties that are salient in each concept's halo.
# Commonsense.similarity() takes an optional "heuristic" parameter to tweak this behavior.
# It is a tuple of two functions:
# 1) function(concept) returns a list of salient properties (or other),
# 2) function(concept1, concept2) returns the cost to traverse this edge (0.0-1.0).
COMMONALITY = (
# Similarity heuristic that only traverses relations between properties.
lambda concept: concept.properties,
lambda edge: 1 - int(edge.context == "properties" and \
edge.type != "is-opposite-of"))
#--- COMMONSENSE -----------------------------------------------------------------------------------
class Commonsense(Graph):
def __init__(self, data=os.path.join(MODULE, "commonsense.csv"), **kwargs):
""" A semantic network of commonsense, using different relation types:
- is-a,
- is-part-of,
- is-opposite-of,
- is-property-of,
- is-related-to,
- is-same-as,
- is-effect-of.
"""
Graph.__init__(self, **kwargs)
self._properties = None
# Load data from the given path,
# a CSV-file of (concept1, relation, concept2, context, weight)-items.
if data is not None:
s = open(data).read()
s = s.strip(BOM_UTF8)
s = s.decode("utf-8")
s = ((v.strip("\"") for v in r.split(",")) for r in s.splitlines())
for concept1, relation, concept2, context, weight in s:
self.add_edge(concept1, concept2,
type = relation,
context = context,
weight = min(int(weight)*0.1, 1.0))
@property
def concepts(self):
return self.nodes
@property
def relations(self):
return self.edges
@property
def properties(self):
""" Yields all concepts that are properties (i.e., adjectives).
For example: "cold is-property-of winter" => "cold".
"""
if self._properties is None:
#self._properties = set(e.node1.id for e in self.edges if e.type == "is-property-of")
self._properties = (e for e in self.edges if e.context == "properties")
self._properties = set(chain(*((e.node1.id, e.node2.id) for e in self._properties)))
return self._properties
def add_node(self, id, *args, **kwargs):
""" Returns a Concept (Node subclass).
"""
self._properties = None
kwargs.setdefault("base", Concept)
return Graph.add_node(self, id, *args, **kwargs)
def add_edge(self, id1, id2, *args, **kwargs):
""" Returns a Relation between two concepts (Edge subclass).
"""
self._properties = None
kwargs.setdefault("base", Relation)
return Graph.add_edge(self, id1, id2, *args, **kwargs)
def remove(self, x):
self._properties = None
Graph.remove(self, x)
def similarity(self, concept1, concept2, k=3, heuristic=COMMONALITY):
""" Returns the similarity of the given concepts,
by cross-comparing shortest path distance between k concept properties.
A given concept can also be a flat list of properties, e.g. ["creepy"].
The given heuristic is a tuple of two functions:
1) function(concept) returns a list of salient properties,
2) function(edge) returns the cost for traversing this edge (0.0-1.0).
"""
if isinstance(concept1, basestring):
concept1 = self[concept1]
if isinstance(concept2, basestring):
concept2 = self[concept2]
if isinstance(concept1, Node):
concept1 = heuristic[0](concept1)
if isinstance(concept2, Node):
concept2 = heuristic[0](concept2)
if is | instance(concept1, list):
| concept1 = [isinstance(n, Node) and n or self[n] for n in concept1]
if isinstance(concept2, list):
concept2 = [isinstance(n, Node) and n or self[n] for n in concept2]
h = lambda id1, id2: heuristic[1](self.edge(id1, id2))
w = 0.0
for p1 in concept1[:k]:
for p2 in concept2[:k]:
p = self.shortest_path(p1, p2, heuristic=h)
w += 1.0 / (p is None and 1e10 or len(p))
return w / k
def nearest_neighbors(self, concept, concepts=[], k=3):
""" Returns the k most similar concepts from the given list.
"""
return sorted(concepts, key=lambda candidate: self.similarity(concept, candidate, k), reverse=True)
similar = neighbors = nn = nearest_neighbors
def taxonomy(self, concept, depth=3, fringe=2):
""" Returns a list of concepts that are descendants of the given concept, using "is-a" relations.
Creates a subgraph of "is-a" related concepts up to the given depth,
then takes the fringe (i.e., leaves) of the subgraph.
"""
def traversable(node, edge):
# Follow parent-child edges.
return edge.node2 == node and edge.type == "is-a"
if |
bitmazk/cmsplugin-redirect | cmsplugin_redirect/tests/south_settings.py | Python | mit | 586 | 0 | """
These settings are used by the ``manage.py`` command.
With normal tests we want to use the fast | est possible way which is an
in-memory sqlite database but if you want to create South migrations you
need a pers | istant database.
Unfortunately there seems to be an issue with either South or syncdb so that
defining two routers ("default" and "south") does not work.
"""
from cmsplugin_redirect.tests.test_settings import * # NOQA
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
}
INSTALLED_APPS.append('south', )
|
boisvert42/npr-puzzle-python | 2018/1028_musical_all_of_us.py | Python | cc0-1.0 | 1,235 | 0.014599 | '''
NPR Puzzle 2018-10-28
https://www.npr.org/2018/10/28/660936138/sunday-puzzle-r | ow-row-row
Think of a famous Broadway musical in two words.
Change one letter in it to the preceding letter of the alphabet —
so B would become A, C would become B, etc.
Remove the space so you have a solid word.
The result will name something that all of us are part of. What is it?
'''
import sys
sys.path.append('..')
import nprcommon | tools as nct
from nltk.corpus import wordnet as wn
import re
#%%
# Get a list of musicals from Wikipedia
musicals = set(x for x in nct.wikipedia_category_members('Broadway_musicals') if x.count(' ') == 1)
#musicals = musicals.union(wikipedia_category_members('Off-Broadway_musicals'))
#musicals = musicals.union(wikipedia_category_members('American musical films'))
words = set(x for x in wn.all_lemma_names() if x.count('_') == 0)
#%%
# Go through musicals and look for ones that work
for musical in musicals:
musical_nospace = re.sub(r'[^A-Za-z]+','',musical).lower()
for i in range(len(musical_nospace)):
letter = musical_nospace[i]
myword = musical_nospace[:i] + nct.letter_shift(letter,-1) + musical_nospace[i+1:]
if myword in words:
print(musical,myword)
|
rockfruit/bika.lims | bika/lims/browser/worksheet/views/analyses_transposed.py | Python | agpl-3.0 | 4,358 | 0.006425 | # coding=utf-8
# This f | ile is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims.browser.bika_listing import | BikaListingTable
from bika.lims.browser.worksheet.views.analyses import AnalysesView
class AnalysesTransposedView(AnalysesView):
""" The view for displaying the table of manage_results transposed.
Analysis Requests are displayed in columns and analyses in rows.
Uses most of the logic provided by BikaListingView through
bika.lims.worksheet.views.AnalysesView to generate the items,
but renders its own template, which is highly specific for
display analysis results. Because of this, some generic
BikaListing functionalities, such as sorting, pagination,
contextual menus for columns, etc. will not work in this view.
"""
def contents_table(self, table_only = True):
""" Overrides contents_table method from the parent class
BikaListingView, using the transposed template instead
of the classic template.
"""
table = AnalysesTransposedTable(bika_listing = self, table_only = True)
return table.render(self)
class AnalysesTransposedTable(BikaListingTable):
""" The BikaListingTable that uses a transposed template for
displaying the results.
"""
render = ViewPageTemplateFile("../templates/analyses_transposed.pt")
render_cell = ViewPageTemplateFile("../templates/analyses_transposed_cell.pt")
def __init__(self, bika_listing = None, table_only = False):
BikaListingTable.__init__(self, bika_listing, True)
self.rows_headers = []
self.trans_items = {}
self.positions = []
self._transpose_data()
def _transpose_data(self):
cached = []
index = 0
#ignore = ['Analysis', 'Service', 'Result', 'ResultDM']
include = ['Attachments', 'DetectionLimit', 'DueDate','Pos', 'ResultDM']
for col in self.bika_listing.review_state['columns']:
if col == 'Result':
# Further interims will be inserted in this position
resindex = index
if col not in include:
continue
lcol = self.bika_listing.columns[col]
self.rows_headers.append({'id': col,
'title': lcol['title'],
'type': lcol.get('type',''),
'row_type': 'field',
'hidden': not lcol.get('toggle', True),
'input_class': lcol.get('input_class',''),
'input_width': lcol.get('input_width','')})
cached.append(col)
index += 1
for item in self.items:
if item['Service'] not in cached:
self.rows_headers.insert(resindex,
{'id': item['Service'],
'title': item['title'],
'type': item.get('type',''),
'row_type': 'analysis',
'index': index})
resindex += 1
cached.append(item['Service'])
pos = item['Pos']
if pos in self.trans_items:
self.trans_items[pos][item['Service']] = item
else:
self.trans_items[pos] = {item['Service']: item}
if pos not in self.positions:
self.positions.append(pos)
def rendered_items(self, cat=None, **kwargs):
return ''
def render_row_cell(self, rowheader, position = ''):
self.current_rowhead = rowheader
self.current_position = position
if rowheader['row_type'] == 'field':
# Only the first item for this position contains common
# data for all the analyses with the same position
its = [i for i in self.items if i['Pos'] == position]
self.current_item = its[0] if its else {}
elif position in self.trans_items \
and rowheader['id'] in self.trans_items[position]:
self.current_item = self.trans_items[position][rowheader['id']]
else:
return ''
return self.render_cell()
|
pacoqueen/bbinn | SQLObject/SQLObject-0.6.1/sqlobject/mysql/__init__.py | Python | gpl-2.0 | 306 | 0.009804 | from | sqlobject.dbconnection import registerConnection
def builder():
import mysqlconnection
return mysqlconnection.MySQLConnection
def isSupported():
try:
import MySQLdb
except ImportError:
return False
return True
registerConnection(['mysql'], builder, isSuppo | rted)
|
jankoslavic/numpy | numpy/lib/function_base.py | Python | bsd-3-clause | 132,305 | 0.000136 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not | be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes |
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogramme |
sander76/home-assistant | tests/components/system_log/test_init.py | Python | apache-2.0 | 11,361 | 0.000616 | """Test system log component."""
import asyncio
import logging
import queue
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import system_log
from homeassistant.core import callback
_LOGGER = logging.getLogger("test_logger")
BASIC_CONFIG = {"system_log": {"max_entries": 2}}
@pytest.fixture
def simple_queue():
"""Fixture that get the queue."""
simple_queue_fixed = queue.SimpleQueue()
with patch(
"homeassistant.components.system_log.queue.SimpleQueue",
return_value=simple_queue_fixed,
):
yield simple_queue_fixed
async def _async_block_until_queue_empty(hass, sq):
# Unfortunately we are stuck with polling
await hass.async_block_till_done()
while not sq.empty():
await asyncio.sleep(0.01)
hass.data[system_log.DOMAIN].acquire()
hass.data[system_log.DOMAIN].release()
await hass.async_block_till_done()
async def get_error_log(hass, hass_client, expected_count):
"""Fetch all entries from system_log via the API."""
client = await hass_client()
resp = await client.get("/api/error/all")
assert resp.status == 200
data = await resp.json()
assert len(data) == expected_count
return data
def _generate_and_log_exception(exception, log):
try:
raise Exception(exception)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.exception(log)
def assert_log(log, exception, message, level):
"""Assert that specified values are in a specific log entry."""
if not isinstance(message, list):
message = [message]
assert log["name"] == "test_logger"
assert exception in log["exception"]
assert message == log["message"]
assert level == log["level"]
assert "timestamp" in log
def get_frame(name):
"""Get log stack frame."""
return (name, 5, None, None)
async def test_normal_logs(hass, simple_queue, hass_client):
"""Test that debug and info are not logged."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.debug("debug")
_LOGGER.info("info")
await _async_block_until_queue_empty(hass, simple_queue)
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_exception(hass, simple_queue, hass_client):
"""Test that exceptions are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_generate_and_log_exception("exception message", "log message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "exception message", "log message", "ERROR")
async def test_warning(hass, simple_queue, hass_client):
"""Test that warning are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.warning("warning message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "", "warning message", "WARNING")
async def test_error(hass, simple_queue, hass_client):
"""Test that errors are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "", "error message", "ERROR")
async def test_config_not_fire_event(hass, simple_queue):
"""Test that errors are not posted as events with default config."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
assert len(events) == 0
async def test_error_posted_as_event(hass, simple_queue):
"""Test that error are posted as events."""
await async_setup_component(
hass, system_log.DOMAIN, {"system_log": {"max_entries": 2, "fire_event": True}}
)
events = []
@callback
def event_listener(event):
"""Listen to events of type system_log_event."""
events.append(event)
hass.bus.async_listen(system_log.EVENT_SYSTEM_LOG, event_listener)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
assert len(events) == 1
assert_log(events[0].data, "", "error message", "ERROR")
async def test_critical(hass, simple_queue, hass_client):
"""Test that critical are logged and retrieved correctly."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.critical("critical message")
await _async_block_until_queue_empty(hass, simple_queue)
log = (await get_error_log(hass, hass_client, 1))[0]
assert_log(log, "", "critical message", "CRITICAL")
async def test_remove_older_logs(hass, simple_queue, hass_client):
"""Test that older logs are rotated out."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error("error message 1")
_LOGGER.error("error message 2")
_LOGGER.error("error message 3")
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 2)
assert_log(log[0], "", "error message 3", "ERROR")
assert_log(log[1], "", "error message 2", "ERROR")
def log_msg(nr=2):
"""Log an error at same line."""
_LOGGER.error("error message %s", nr)
async def test_dedupe_logs(hass, simple_queue, hass_client):
"""Test that duplicate log entries are dedupe."""
await async_setup_component(hass, system_log.DOMAIN, {})
_LOGGER.error("error message 1")
log_msg()
log_msg("2-2")
_LOGGER.error("error message 3")
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 3)
assert_log(log[0], "", "error message 3", "ERROR")
assert log[1]["count"] == 2
assert_log(log[1], "", ["error message 2", "error message 2-2"], "ERROR")
log_msg()
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 3)
assert_log(log[0], "", ["error message 2", "error message 2-2"], "ERROR")
assert log[0]["timestamp"] > log[0]["first_occurred"]
log_msg("2-3")
log_msg("2-4")
log_msg("2-5")
log_msg("2-6")
await _async_block_until_queue_empty(hass, simple_queue)
log = await get_error_log(hass, hass_client, 3)
assert_log(
log[0],
"",
[
"error message 2-2",
"error message 2-3",
"error message 2-4",
"error message 2-5",
"error message 2-6",
],
"ERROR",
)
async def test_clear_logs(hass, simple_queue, hass_client):
"""Test that the log can be cleared via a service call."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
_LOGGER.error("error message")
await _async_block_until_queue_empty(hass, simple_queue)
await hass.services.async_call(system_log.DOMAIN, system_log.SERVICE_CLEAR, {})
await _async_block_until_queue_empty(hass, simple_queue)
# Assert done by get_error_log
await get_error_log(hass, hass_client, 0)
async def test_write_log(hass):
"""Test that error propagates to logger."""
await async_setup_component(hass, system_log.DOMAIN, BASIC_CONFIG)
logger = MagicMock()
wit | h patch("logging.ge | tLogger", return_value=logger) as mock_logging:
await hass.services.async_call(
system_log.DOMAIN, system_log.SERVICE_WRITE, {"message": "test_message"}
)
await hass.async_block_till_done()
mock_logging.assert_called_once_with("homeassistant.components.system_log.external")
assert logger.method_calls[0] == ("error", ("test_message",))
async def |
grapesmoker/regulations-parser | regparser/grammar/amdpar.py | Python | cc0-1.0 | 17,063 | 0.000117 | # vim: set encoding=utf-8
# @todo: this file is becoming too large; refactor
import logging
import string
from pyparsing import CaselessLiteral, FollowedBy, OneOrMore, Optional
from pyparsing import Suppress, Word, LineEnd, ZeroOrMore
from regparser.grammar import atomic, tokens, unified
from regparser.grammar.utils import Marker, WordBoundaries
from regparser.tree.paragraph import p_levels
intro_text_marker = (
(Marker("introductory") + WordBoundaries(CaselessLiteral("text")))
| (Marker("subject") + Marker("heading")).setParseAction(lambda _: "text")
)
of_connective = (Marker("of") | Marker("for") | Marker("to"))
passive_marker = (
Marker("is") | Marker("are") | Marker("was") | Marker("were")
| Marker("and").setResultsName("and_prefix").setParseAction(
lambda _: True))
and_token = Marker("and").setParseAction(lambda _: tokens.AndToken())
# Verbs
def generate_verb(word_list, verb, active):
"""Short hand for making tokens.Verb from a list of trigger words"""
word_list = [CaselessLiteral(w) for w in word_list]
if not active:
word_list = [passive_marker + w for w in word_list]
grammar = reduce(lambda l, r: l | r, word_list)
grammar = WordBoundaries(grammar)
grammar = grammar.setParseAction(
lambda m: tokens.Verb(verb, active, bool(m.and_prefix)))
return grammar
put_active = generate_verb(
['revising', 'revise', 'correcting', 'correct'],
tokens.Verb.PUT, active=True)
put_passive = generate_verb(
['revised', 'corrected'], tokens.Verb.PUT,
active=False)
post_active = generate_verb(['adding', 'add'], tokens.Verb.POST, active=True)
post_passive = generate_verb(['added'], tokens.Verb.POST, active=False)
delete_active = generate_verb(
['removing', 'remove'], tokens.Verb.DELETE, active=True)
delete_passive = generate_verb(['removed'], tokens.Verb.DELETE, active=False)
move_active = generate_verb(
['redesignating', 'redesignate'], tokens.Verb.MOVE, active=True)
move_passive = generate_verb(['redesignated'], tokens.Verb.MOVE, active=False)
designate_active = generate_verb(
['designate'],
tokens.Verb.DESIGNATE, active=True)
reserve_active = generate_verb(['reserve', 'reserving'],
tokens.Verb.RESERVE, active=True)
# Context
context_certainty = Optional(
Marker("in") | Marker("to") | (
Marker("under") + Optional(
Marker("subheading")))).setResultsName("certain")
interp = (
context_certainty + atomic.comment_marker + unified.marker_part
).setParseAction(lambda m: tokens.Context([m.part, 'Interpretations'],
bool(m.certain)))
# This may be a regtext paragraph or it may be an interpretation
paragraph_context = (
atomic.section
+ unified.depth1_p
+ ~FollowedBy("-")
).setParseAction(
lambda m: tokens.Context([None, None, m.section, m.p1, m.p2, m.p3, m.p4,
m.plaintext_p5, m.plaintext_p6]))
def _paren_join(elements):
return '(' + ')('.join(el for el in elements if el) + ')'
marker_subpart = (
context_certainty
+ unified.marker_subpart
).setParseAction(lambda m: tokens.Context(
[None, 'Subpart:' + m.subpart], bool(m.certain)))
comment_context_with_section = (
context_certainty
# Confusingly, these are sometimes "comments", sometimes "paragraphs"
+ (Marker("comment") | Marker("paragraph"))
+ atomic.section
+ unified.depth1_p
+ ~FollowedBy("-")
).setParseAction(lambda m: tokens.Context(
[None, 'Interpretations', m.section,
_paren_join([m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])
], bool(m.certain)))
# Mild modification of the above; catches "under 2(b)"
comment_context_under_with_section = (
Marker("under")
+ atomic.section
+ unified.depth1_p
).setParseAction(lambda m: tokens.Context(
[None, 'Interpretations', m.section,
_paren_join([m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])
], True))
comment_context_without_section = (
context_certainty
+ atomic.paragraph_marker
+ unified.depth2_p
| ).setParseAction(lambda m: tokens.Context(
[None, 'Interpretations', None,
_paren_join([m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])
], bool(m.certain)))
appendix = (
context_certainty
+ unified.marker_appendix
+ Optional(Marker("to") + unified.marker_part)
).setParseAction(lambda m: tokens.Context(
[m.part, 'App | endix:' + m.appendix], bool(m.certain)))
section = (
context_certainty
+ atomic.section_marker
+ unified.part_section).setParseAction(lambda m: tokens.Context(
[m.part, None, m.section], bool(m.certain)))
# Paragraph components (used when not replacing the whole paragraph)
section_heading = Marker("heading").setParseAction(
lambda _: tokens.Paragraph([], field=tokens.Paragraph.HEADING_FIELD))
intro_text = intro_text_marker.copy().setParseAction(
lambda _: tokens.Paragraph([], field=tokens.Paragraph.TEXT_FIELD))
# Paragraphs
comment_p = (
Word(string.digits).setResultsName("level2")
+ Optional(
Suppress(".") + Word("ivxlcdm").setResultsName('level3')
+ Optional(
Suppress(".")
+ Word(string.ascii_uppercase).setResultsName("level4"))))
section_heading_of = (
Marker("heading") + of_connective
+ unified.marker_part_section
).setParseAction(
lambda m: tokens.Paragraph([m.part, None, m.section],
field=tokens.Paragraph.HEADING_FIELD))
section_paragraph_heading_of = (
Marker("heading") + of_connective
+ (atomic.paragraph_marker | Marker("comment"))
+ atomic.section
+ unified.depth1_p
).setParseAction(
lambda m: tokens.Paragraph([None, 'Interpretations', m.section,
_paren_join([m.p1, m.p2, m.p3, m.p4, m.p5])],
field=tokens.Paragraph.HEADING_FIELD))
appendix_subheading = (
Marker("subheading")
+ unified.marker_appendix
).setParseAction(
# Use '()' to pad the label out to what's expected of interpretations
lambda m: tokens.Paragraph([None, 'Interpretations', m.appendix, '()'],
field=tokens.Paragraph.HEADING_FIELD))
paragraph_heading_of = (
Marker("heading") + of_connective
+ unified.marker_paragraph.copy()
).setParseAction(
lambda m: tokens.Paragraph([None, None, None, m.p1, m.p2, m.p3, m.p4,
m.plaintext_p5, m.plaintext_p6],
field=tokens.Paragraph.KEYTERM_FIELD))
comment_heading = (
Marker("heading")
+ Optional(of_connective)
+ atomic.section
+ unified.depth1_p).setParseAction(
lambda m: tokens.Paragraph([None, "Interpretations", m.section,
_paren_join([m.p1, m.p2, m.p3, m.p4, m.p5])],
field=tokens.Paragraph.HEADING_FIELD))
intro_text_of = (
intro_text_marker + of_connective
+ unified.marker_paragraph.copy()
).setParseAction(
lambda m: tokens.Paragraph([None, None, None, m.p1, m.p2, m.p3, m.p4,
m.plaintext_p5, m.plaintext_p6],
field=tokens.Paragraph.TEXT_FIELD))
intro_text_of_interp = (
intro_text_marker + of_connective
+ atomic.paragraph_marker
+ comment_p
).setParseAction(lambda m: tokens.Paragraph([
None, 'Interpretations', None, None, m.level2, m.level3,
m.level4], field=tokens.Paragraph.TEXT_FIELD))
single_par = (
unified.marker_paragraph
+ Optional(intro_text_marker)
).setParseAction(lambda m: tokens.Paragraph([
None, None, None, m.p1, m.p2, m.p3, m.p4, m.plaintext_p5,
m.plaintext_p6],
field=(tokens.Paragraph.TEXT_FIELD if m[-1] == 'text' else None)))
section_single_par = (
unified.marker_part_section
+ unified.depth1_p
+ Optional(intro_text_marker)
).setParseAction(lambda m: tokens.Paragraph([
m.part, None, m.section, m.p1, m.p2, m.p3, m.p4, m.plaintext_p5,
m.plaintext_p6 |
slackpad/hashtagtodo-open | todo/stat_rollup.py | Python | mit | 1,528 | 0.002618 | from datetime import datetime
from collections import defaultdict
from todo.models.calendar import Calendar
from todo.models.event import Event
from todo.models.stat import Stat
from todo.models.user import User
def rollup_users():
rollup = defaultdict(lambda: defaultdict(int))
for user in User.get_all():
(year, week, day) = user.created.isocalendar()
rollup[year][week] += 1
def gen():
for year in sorted(rollup):
for week in sorted(rollup[year]):
yield (year, week)
cum = 0
for year, week in gen():
cum += rollup[year][week]
Stat.create_or_update('user-count', year, week, cum)
def rollup_todos():
created = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
engaged = defaultdict(lambda: defaultdict(lambda: defaultd | ict(int)))
for event in Event.query():
user = event.key.parent().parent().id()
(year, week, day) = event.created.isocalendar | ()
created[year][week][user] += 1
(year, week, day) = event.updated.isocalendar()
engaged[year][week][user] += 1
for year, weekly in created.iteritems():
for week, users in weekly.iteritems():
Stat.create_or_update('user-created-event', year, week, len(users.keys()))
for year, weekly in engaged.iteritems():
for week, users in weekly.iteritems():
Stat.create_or_update('user-engaged-event', year, week, len(users.keys()))
def do_rollup():
rollup_users()
# rollup_todos()
|
twaldear/flask-secure-headers | flask_secure_headers/tests/core_test.py | Python | mit | 5,918 | 0.038189 | import unittest
from flask import Flask
from flask_secure_headers.core import Secure_Headers
from flask_secure_headers.headers import CSP
class TestCSPHeaderCreation(unittest.TestCase):
def test_CSP_pass(self):
sh = Secure_Headers()
defaultCSP = sh.defaultPolicies['CSP']
""" test CSP policy update """
h = CSP({'script-src':['self','code.jquery.com']}).update_policy(defaultCSP)
self.assertEquals(h['script-src'],['self', 'code.jquery.com'])
self.assertEquals(h['default-src'],['self'])
self.assertEquals(h['img-src'],[])
""" test CSP policy rewrite """
h = CSP({'default-src':['none']}).rewrite_policy(defaultCSP)
self.assertEquals(h['script-src'],[])
self.assertEquals(h['default-src'],['none'])
self.assertEquals(h['report-uri'],[])
""" test CSP header creation """
h = CSP({'default-src':['none']}).create_header()
self.assertEquals(h['Content-Security-Policy'],"default-src 'none'")
""" test CSP -report-only header creation """
h = CSP({'default-src':['none'],'report-only':True}).create_header()
self.assertEquals(h['Content-Security-Policy-Report-Only'],"default-src 'none'")
def test_CSP_fail(self):
""" test invalid paramter for CSP update """
with self.assertRaises(Exception):
h = CSP({'test-src':['self','code.jquery.com']}).update_policy()
class TestAppUseCase(unittest.TestCase):
""" test header creation in flask app """
def setUp(self):
self.app = Flask(__name__)
self.sh = Secure_Headers()
def test_defaults(self):
""" test header wrapper with default headers """
@self.app.route('/')
@self.sh.wrapper()
def index(): return "hi"
with self.app.test_client() as c:
result = c.get('/')
self.assertEquals(result.headers.get('X-XSS-Protection'),'1; mode=block')
self.assertEquals(result.headers.get('Strict-Transport-Security'),'includeSubDomains; max-age=31536000')
self.assertEquals(result.headers.get('Public-Key-Pins'),'includeSubDomains; report-uri=/hpkp_report; max-age=5184000')
self.assertEquals(result.headers.get('X-Content-Type-Options'),'nosniff')
self.assertEquals(result.headers.get('X-Permitted-Cross-Domain-Policies'),'none')
self.assertEquals(result.headers.get('X-Download-Options'),'noopen')
self.assertEquals(result.headers.get('X-Frame-Options'),'sameorigin')
self.assertEquals(result.headers.get('Content-Security-Policy'),"report-uri /csp_report; default-src 'self'")
def test_update_function(self):
""" test config update function """
self.sh.update(
{
'X_Permitted_Cross_Domain_Policies':{'value':'all'},
'CSP':{'script-src':['self','code.jquery.com']},
'HPKP':{'pins':[{'sha256':'test123'},{'sha256':'test2256'}]}
}
)
@self.app.route('/')
@self.sh.wrapper()
def index(): return "hi"
with self.app.test_client() as c:
result = c.get('/')
self.assertEquals(result.headers.get('X-Permitted-Cross-Domain-Policies'),'all')
self.assertEquals(result.headers.get('Content-Security-Policy'),"script-src 'self' code.jquery.com; report-uri /csp_report; default-src 'self'")
self.assertEquals(result.headers.get('Public-Key-Pins'),"pin-sha256=test123; pin-sha256=test2256; includeSubDomains; report-uri=/hpkp_report; max-age=5184000")
def test_rewrite_function(self):
""" test config rewrite function """
self.sh.rewrite(
{
'CSP':{'default-src':['none']},
'HPKP':{'pins':[{'sha256':'test123'}]}
}
)
@self.app.route('/')
@self.sh.wrapper()
def index(): return "hi"
with self.app.test_client() as c:
| result = c.get('/')
self.assertEquals(result.headers.get('Content-Security-Policy'),"default-src 'none'")
self.assertEquals(result.headers.get('Public-Key-Pins'),"pin-sha256=test123")
def test_wrapper_update_function(self):
""" test updating policies from wrapper """
self.sh.rewrite(
{
'CSP':{'default-src':['none']},
'HPKP':{'pins':[{'sha256':'test123'}]}
}
)
@self.app.route('/')
@self.sh.wrapper(
{ |
'CSP':{'script-src':['self','code.jquery.com']},
'X_Permitted_Cross_Domain_Policies':{'value':'none'},
'X-XSS-Protection':{'value':1,'mode':False},
'HPKP':{'pins':[{'sha256':'test2256'}]},
}
)
def index(): return "hi"
with self.app.test_client() as c:
result = c.get('/')
self.assertEquals(result.headers.get('X-Permitted-Cross-Domain-Policies'),'none')
self.assertEquals(result.headers.get('Content-Security-Policy'),"script-src 'self' code.jquery.com; default-src 'none'")
self.assertEquals(result.headers.get('X-XSS-Protection'),'1')
self.assertEquals(result.headers.get('Public-Key-Pins'),"pin-sha256=test2256; pin-sha256=test123")
@self.app.route('/test')
@self.sh.wrapper({'CSP':{'script-src':['nonce-1234']}})
def test(): return "hi"
with self.app.test_client() as c:
result = c.get('/test')
self.assertEquals(result.headers.get('Content-Security-Policy'),"script-src 'self' code.jquery.com 'nonce-1234'; default-src 'none'")
def test_passing_none_value_rewrite(self):
""" test removing header from update/rewrite """
self.sh.rewrite({'CSP':None,'X_XSS_Protection':None})
@self.app.route('/')
@self.sh.wrapper()
def index(): return "hi"
with self.app.test_client() as c:
result = c.get('/')
self.assertEquals(result.headers.get('X-Permitted-Cross-Domain-Policies'),'none')
self.assertEquals(result.headers.get('CSP'),None)
self.assertEquals(result.headers.get('X-XSS-Protection'),None)
def test_passing_none_value_wrapper(self):
""" test removing policy from wrapper """
@self.app.route('/')
@self.sh.wrapper({'CSP':None,'X-XSS-Protection':None})
def index(): return "hi"
with self.app.test_client() as c:
result = c.get('/')
self.assertEquals(result.headers.get('X-Permitted-Cross-Domain-Policies'),'none')
self.assertEquals(result.headers.get('CSP'),None)
self.assertEquals(result.headers.get('X-XSS-Protection'),None)
if __name__ == '__main__':
unittest.main()
|
cloudfoundry-community/splunk-firehose-nozzle | testing/integration/lib/helper.py | Python | apache-2.0 | 507 | 0.001972 | # Common functions used in this project
import os
d | ef get_integration_folder():
"""
returns the integration test folder
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def get_config_fold | er():
"""
returns the config folder
"""
return os.path.join(get_integration_folder(), "config")
def get_project_folder():
"""
returns the project root folder
"""
return get_integration_folder().replace("/testing/integration", "/")
|
happyleavesaoc/aoc-mgz | mgz/const.py | Python | mit | 6,198 | 0.001613 | """Constants."""
# pylint: disable=too-many-lines
MODS = {
1: 'Wololo Kingdoms',
2: 'Portuguese Civilization Mod III',
3: 'Age of Chivalry',
4: 'Sengoku',
7: 'Realms',
101: 'King of the Hippo'
}
SPEEDS = {
100: 'slow',
150: 'standard',
169: 'standard', # de
178: 'standard', # up15
200: 'fast',
237: 'fast' # up15
}
DE_MAP_NAMES = {
9: 'Arabia',
10: 'Archipelago',
11: 'Baltic',
12: 'Black Forest',
13: 'Coastal',
14: 'Continental',
15: 'Crater Lake',
16: 'Fortress',
17: 'Gold Rush',
18: 'Highland',
19: 'Islands',
20: 'Mediterranean',
21: 'Migration',
22: 'Rivers',
23: 'Team Islands',
24: 'Full Random',
25: 'Scandinavia',
26: 'Mongolia',
27: 'Yucatan',
28: 'Salt Marsh',
29: 'Arena',
30: 'King of the Hill',
31: 'Oasis',
32: 'Ghost Lake',
33: 'Nomad',
49: 'Iberia',
50: 'Britain',
51: 'Mideast',
52: 'Texas',
53: 'Italy',
54: 'Central America',
55: 'France',
56: 'Norse Lands',
57: 'Sea of Japan (East Sea)',
58: 'Byzantium',
59: 'Custom',
60: 'Random Land Map',
62: 'Random Real World Map',
63: 'Blind Random',
65: 'Random Special Map',
66: 'Random Special Map',
67: 'Acropolis',
68: 'Budapest',
69: 'Cenotes',
70: 'City of Lakes',
71: 'Golden Pit',
72: 'Hideout',
73: 'Hill Fort',
74: 'Lombardia',
75: 'Steppe',
76: 'Valley',
77: 'MegaRandom',
78: 'Hamburger',
79: 'CtR Random',
80: 'CtR Monsoon',
81: 'CtR Pyramid Descent',
82: 'CtR Spiral',
83: 'Kilimanjaro',
84: 'Mountain Pass',
85: 'Nile Delta',
86: 'Serengeti',
87: 'Socotra',
88: 'Amazon',
89: 'China',
90: 'Horn of Africa',
91: 'India',
92: 'Madagascar',
93: 'West Africa',
94: 'Bohemia',
95: 'Earth',
96: 'Canyons',
97: 'Enemy Archipelago',
98: 'Enemy Islands',
99: 'Far Out',
100: 'Front Line',
101: 'Inner Circle',
102: 'Motherland',
103: 'Open Plains',
104: 'Ring of Water',
105: 'Snakepit',
106: 'The Eye',
107: 'Australia',
108: 'Indochina',
109: 'Indonesia',
110: 'Strait of Malacca',
111: 'Ph | ilippines',
112: 'Bog Islands',
113: 'Mangrove Jungle',
114: 'Pacific Islands',
115: 'Sandbank',
116: 'Water Nomad',
117: 'Jungle Islands',
118: 'Holy Line',
119: 'Border Stones',
120: 'Yin Yang',
121: 'Jungle Lanes',
122: 'Alpine Lakes',
123: 'Bogland',
124: 'Mountain Ridge',
125: 'Ravines',
126: 'Wolf Hill',
132: 'Antarctica',
137: 'Custom Map Pool',
139: | 'Golden Swamp',
140: 'Four Lakes',
141: 'Land Nomad',
142: 'Battle on Ice',
143: 'El Dorado',
144: 'Fall of Axum',
145: 'Fall of Rome',
146: 'Majapahit Empire',
147: 'Amazon Tunnel',
148: 'Coastal Forest',
149: 'African Clearing',
150: 'Atacama',
151: 'Seize the Mountain',
152: 'Crater',
153: 'Crossroads',
154: 'Michi',
155: 'Team Moats',
156: 'Volcanic Island'
}
MAP_NAMES = {
9: 'Arabia',
10: 'Archipelago',
11: 'Baltic',
12: 'Black Forest',
13: 'Coastal',
14: 'Continental',
15: 'Crater Lake',
16: 'Fortress',
17: 'Gold Rush',
18: 'Highland',
19: 'Islands',
20: 'Mediterranean',
21: 'Migration',
22: 'Rivers',
23: 'Team Islands',
24: 'Random',
25: 'Scandinavia',
26: 'Mongolia',
27: 'Yucatan',
28: 'Salt Marsh',
29: 'Arena',
30: 'King of the Hill',
31: 'Oasis',
32: 'Ghost Lake',
33: 'Nomad',
34: 'Iberia',
35: 'Britain',
36: 'Mideast',
37: 'Texas',
38: 'Italy',
39: 'Central America',
40: 'France',
41: 'Norse Lands',
42: 'Sea of Japan (East Sea)',
43: 'Byzantinum',
48: 'Blind Random',
49: 'Acropolis',
50: 'Budapest',
51: 'Cenotes',
52: 'City of Lakes',
53: 'Golden Pit',
54: 'Hideout',
55: 'Hill Fort',
56: 'Lombardia',
57: 'Steppe',
58: 'Valley',
59: 'MegaRandom',
60: 'Hamburger',
61: 'CtR Random',
62: 'CtR Monsoon',
63: 'CtR Pyramid Descent',
64: 'CtR Spiral',
66: 'Acropolis',
67: 'Budapest',
68: 'Cenotes',
69: 'City of Lakes',
70: 'Golden Pit',
71: 'Hideout',
72: 'Hill Fort',
73: 'Lombardia',
74: 'Steppe',
75: 'Valley',
76: 'MegaRandom',
77: 'Hamburger',
78: 'CtR Random',
79: 'CtR Monsoon',
80: 'CtR Pyramid Descent',
81: 'CtR Spiral',
82: 'Kilimanjaro',
83: 'Mountain Pass',
84: 'Nile Delta',
85: 'Serengeti',
86: 'Socotra',
87: 'Amazon',
88: 'China',
89: 'Horn of Africa',
90: 'India',
91: 'Madagascar',
92: 'West Africa',
93: 'Bohemia',
94: 'Earth',
95: 'Canyons',
96: 'Enemy Archipelago',
97: 'Enemy Islands',
98: 'Far Out',
99: 'Front Line',
100: 'Inner Circle',
101: 'Motherland',
102: 'Open Plains',
103: 'Ring of Water',
104: 'Snakepit',
105: 'The Eye',
125: 'Ravines'
}
MAP_SIZES = {
120: 'tiny',
144: 'small',
168: 'medium',
200: 'normal',
220: 'large',
240: 'giant',
255: 'maximum'
}
COMPASS = {
'northwest': [1/3.0, 0],
'southeast': [1/3.0, 2/3.0],
'southwest': [0, 1/3.0],
'northeast': [2/3.0, 1/3.0],
'center': [1/3.0, 1/3.0],
'west': [0, 0],
'north': [2/3.0, 0],
'east': [2/3.0, 2/3.0],
'south': [0, 2/3.0]
}
VALID_BUILDINGS = [
10, 12, 14, 18, 19, 20, 31, 32, 42, 45, 47, 49, 50, 51, 63, 64, 68, 70, 71, 72,
78, 79, 82, 84, 85, 86, 87, 88, 90, 91, 101, 103, 104, 105, 109, 116, 117, 129,
130, 131, 132, 133, 137, 141, 142, 153, 155, 199, 209, 210, 234, 235, 236, 276,
331, 357, 463, 464, 465, 484, 487, 488, 490, 491, 498, 562, 563, 564, 565, 584,
585, 586, 587, 597, 598, 617, 621, 659, 661, 665, 667, 669, 673, 674, 734, 789,
790, 792, 793, 794, 796, 797, 798, 800, 801, 802, 804, 1189
] + [1553, 387, 110, 785, 1002] + [1021, 1187, 1251, 1665] + [946, 947, 886, 888, 881, 879, 938, 871] + [1665] # 5th Age, Realms, DE, Various, DE DLC1
|
VectorBlox/PYNQ | python/pynq/iop/tests/test_pmod_cable.py | Python | bsd-3-clause | 7,289 | 0.025655 | # Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
from random import randint
from time import sleep
import pytest
from pynq import Overlay
from pynq.iop import PMODA
from pynq.iop import PMODB
from pynq.iop import Pmod_Cable
from pynq.tests.util import user_answer_yes
from pynq.tests.util import get_pmod_id
flag = user_answer_yes("\nTwo Pmod interfaces connected by a cable?")
if flag:
global TX_PORT,RX_PORT
send_id = get_pmod_id('sender')
if send_id == 'A':
TX_PORT = PMODA
elif send_id == 'B':
TX_PORT = PMODB
else:
raise ValueError("Please type in A or B.")
recv_id = get_pmod_id('receiver')
if recv_id == 'A':
RX_PORT = PMODA
elif recv_id == 'B':
RX_PORT = PMODB
else:
raise ValueError("Please type in A or B.")
@pytest.mark.run(order=16)
@pytest.mark.skipif(not flag, reason="need Pmod cable connected to run")
def test_cable_type():
"""Tests for the Pmod cable type.
Note
----
The cable type can only be 'straight' or 'loopback'.
Default cable type is straight.
The Pmod IO layout is:
Upper row: {vdd,gnd,3,2,1,0}.
Lower row: {vdd,gnd,7,6,5,4}.
"""
print('\nTesting Pmod IO cable...')
assert not TX_PORT == RX_PORT, \
"The sender port cannot be the receiver port."
global tx,rx
tx = [Pmod_Cable(TX_PORT,k,'out','loopback') for k in range(8)]
rx = [Pmod_Cable(RX_PORT,k,'in','loopback') for k in range(8)]
tx[0].write(0)
tx[3].write(0)
tx[4].write(1)
tx[7].write(1)
if [rx[0].read(),rx[3].read(),rx[4].read(),rx[7].read()]==[0,0,1,1]:
# Using a loop-back cable
for i in range(8):
rx[i].set_cable('loopback')
elif [rx[0].read(),rx[3].read(),rx[4].read(),rx[7].read()]==[1,1,0,0]:
# Using a straight cable
for i in range(8):
rx[i].set_cable('straight')
else:
raise AssertionError("Cable unrecognizable.")
@pytest.mark.run(order=17)
@pytest.mark.skipif(not flag, reason="need Pmod cable connected to run")
def test_rshift1():
"""Test for right shifting the bit "1".
The sender will send patterns with the bit "1" right shifted each time.
"""
print('\nGenerating tests for right shifting a \"1\"...')
global tx,rx
for i in range(8):
if i==0:
data1 = [1,0,0,0,0,0,0,0]
else:
data1 = data1[-1:]+data1[:-1]
data2 = [0,0,0,0,0,0,0,0]
tx[i].write(data1[i])
sleep(0.001)
data2[i] = rx[i].read()
assert data1==data2,\
'Sent {} != received {} at Pin {}.'.format(data1,data2,i)
@pytest.mark.run(order=18)
@pytest.mark.skipif(not flag, reason="need Pmod cable connected to run")
def test_rshift0():
"""Test for right shifting the bit "0".
The sender will send patterns with the bit "0" right shifted each time.
"""
print('\nGenerating tests for right shifting a \"0\"...')
global tx,rx
for i in range(8):
if i==0:
data1 = [0,1,1,1,1,1,1,1]
else:
data1 = data1[-1:]+data1[:-1]
data2 = [1,1,1,1,1,1,1,1]
tx[i].write(data1[i])
sleep(0.001)
data2[i] = rx[i].read()
assert data1==data2,\
'Sent {} != received {} at Pin {}.'.format(data1,data2,i)
@pytest.mark.run(order=19)
@pytest.mark.skipif(not flag, reason="need Pmod cable connected to run")
def test_lshift1():
"""Test for left shifting the bit "1".
The sender will send patterns with the bit "1" left shifted each time.
"""
print('\nGenerating tests for left shifting a \"1\"...')
global tx,rx
for i in range(8):
if | i==0:
data1 = [0,0,0,0,0,0,0,1]
else:
data1 = data1[1:]+data1[:1]
data2 = [0,0,0,0,0,0,0,0]
tx[7-i].write(data1[7-i])
sleep(0.001)
data2[7-i] = rx[7-i].read()
assert data1==data2,\
'Sent {} != received {} at Pin {}.'.format(data1,data2,7-i)
@pytest.mark.run(order=20)
@pytest.mark.skipif(not flag, reason="need Pmod cable connected to run")
def | test_lshift0():
"""Test for left shifting the bit "0".
The sender will send patterns with the bit "0" left shifted each time.
"""
print('\nGenerating tests for left shifting a \"0\"...')
global tx,rx
for i in range(8):
if i==0:
data1 = [1,1,1,1,1,1,1,0]
else:
data1 = data1[1:]+data1[:1]
data2 = [1,1,1,1,1,1,1,1]
tx[7-i].write(data1[7-i])
sleep(0.001)
data2[7-i] = rx[7-i].read()
assert data1==data2,\
'Sent {} != received {} at Pin {}.'.format(data1,data2,7-i)
@pytest.mark.run(order=21)
@pytest.mark.skipif(not flag, reason="need Pmod cable connected to run")
def test_random():
"""Test for random patterns.
Testing software-generated pseudo-random numbers. Random 0/1's are
generated at each bit location. 8 bits (1 bit per pin) are sent out
in every iteration. This test may take a few seconds to finish.
"""
print('\nGenerating 100 random tests...')
global tx,rx
for i in range(100):
data1=[0,0,0,0,0,0,0,0]
data2=[1,1,1,1,1,1,1,1]
for j in range(8):
data1[j] = randint(0,1)
tx[j].write(data1[j])
sleep(0.001)
data2[j] = rx[j].read()
assert data1==data2,\
'Sent {} != received {} at Pin {}.'.format(data1,data2,j)
del tx,rx
|
NikNitro/Python-iBeacon-Scan | sympy/assumptions/tests/test_assumptions_2.py | Python | gpl-3.0 | 1,849 | 0.000541 | """
rename this to test_assumptions.py when the old assumptions system is deleted
"""
from sympy.abc import x, y
from sympy.assumptions.assume import global_assumptions, Predicate
from sympy.assumptions.ask import _extract_facts, Q
from sympy.core import symbols
from sympy.logic.boolalg import Or
from sympy.printing import pretty
from sympy.utilities.pytest import XFAIL
def test_equal():
"""Test for equality"""
assert Q.positive(x) == Q.positive(x)
assert Q.positive(x) != ~Q.positive(x)
assert ~Q.positive(x) == ~Q.positive(x)
def test_pretty():
assert pretty(Q.positive(x)) == "Q.positive(x)"
assert pretty(
set([Q.positive, Q.integer])) == "set([Q.integer, Q.positive])"
def test_extract_facts():
a, b = symbols('a b', cls=Predicate)
assert _extract_facts(a(x), x) == a
assert _extract_facts(a(x), y) is None
assert _extract_facts(~a(x), x) == ~a
assert _extract_facts(~a(x), y) is None
assert _extract_facts(a(x) | b(x), x) == a | b
assert _extract_facts(a(x) | ~b(x), x) == a | ~b
assert _extract_facts(a(x) & b(y), x) == a
assert _extract_facts(a(x) & b(y), y) == b
assert _extract_facts(a(x) | b(y), x | ) == None
assert _extract_facts(~(a(x) | b(y)), x) == ~a
def test_global():
"""Test for global assumptions"""
global_assumptions.add(Q.is_true(x > 0))
assert Q.is_true(x > 0) in global_assumptions
global_assumptions.remove(Q.is_true(x > 0))
assert not Q.is_true(x > 0) in global_assumptions
# same with multiple of assumptions
global_assumptions.add(Q.is_true(x > 0), Q.is_true(y > 0))
assert Q.is_true(x > 0) in global_assumptions
assert Q.is_true(y > | 0) in global_assumptions
global_assumptions.clear()
assert not Q.is_true(x > 0) in global_assumptions
assert not Q.is_true(y > 0) in global_assumptions
|
JensGrabner/mpmath | mpmath/calculus/polynomials.py | Python | bsd-3-clause | 7,877 | 0.001396 | from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Polynomials #
#----------------------------------------------------------------------------#
# XXX: extra precision
@defun
def polyval(ctx, coeffs, x, derivative=False):
r"""
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
:func:`~mpmath.polyval` evaluates the polynomial
.. math ::
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
tuple `(P(x), P'(x))`.
>>> from mpmath import *
>>> mp.pretty = True
>>> polyval([3, 0, 2], 0.5)
2.75
>>> polyval([3, 0, 2], 0.5, derivative=True)
(2.75, 3.0)
The coefficients and the evaluation point may be any combination
of real or complex numbers.
"""
if not coeffs:
return ctx.zero
p = ctx.convert(coeffs[0])
q = ctx.zero
for c in coeffs[1:]:
if derivative:
q = p + x*q
p = c + x*p
if derivative:
return p, q
else:
return p
@defun
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
error=False, roots_init=None):
"""
Computes all roots (real or complex) of a given polynomial.
The roots are returned as a sorted list, where real roots appear first
followed by complex conjugate roots as adjacent elements. The polynomial
should be given as a list of coefficients, in the format used by
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
where *err* is an estimate of the maximum error among the computed roots.
**Examples**
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(polyroots([1,-1,-14,24]), 4)
[-4.0, 2.0, 3.0]
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
error estimate::
>>> roots, err = polyroots([4,3,2], error=True)
>>> for r in roots:
... print(r)
...
(-0.375 + 0.59947894041409j)
(-0.375 - 0.59947894041409j)
>>>
>>> err
2.22044604925031e-16
>>>
>>> polyval([4,3,2], roots[0])
(2.22044604925031e-16 + 0.0j)
>>> polyval([4,3,2], roots[1])
(2.22044604925031e-16 + 0.0j)
The following example computes all the 5th roots of unity; that is,
the roots of `x^5 - 1`::
>>> mp.dps = 20
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
... print(r)
...
1.0
(-0.8090169943749474241 + 0.58778525229247312917j)
(-0.8090169943749474241 - 0.58778525229247312917j)
(0.3090169943749474241 + 0.95105651629515357212j)
(0.3090169943749474241 - 0.95105651629515357212j)
**Precision and conditioning**
The roots are computed to the current working precision accuracy. If this
accuracy cannot be achieved in ``maxsteps`` steps, then a
``NoConvergence`` exception is raised. The algorithm internally is using
the current working precision extended by ``extraprec``. If
``NoConvergence`` was raised, that is caused either by not having enough
extra precision to achieve convergence (in which case increasing
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
case increasing ``maxsteps`` should fix the problem), or a combination of
both.
The user should always do a convergence study with regards to
``extraprec`` to ensure accurate results. It is possible to get
convergence to a wrong answer with too low ``extraprec``.
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
typically compute all roots of an arbitrary polynomial to high precision::
>>> mp.dps = 60
>>> for r in polyroots([1, 0, -10, 0, 1]):
... print(r)
...
-3.14626436994197234232913506571557044551247712918732870123249
-0.317837245195782244725757617296174288373133378433432554879127
0.317837245195782244725757617296174288373133378433432554879127
3.14626436994197234232913506571557044551247712918732870123249
>>>
>>> sqrt(3) + sqrt(2)
3.14626436994197234232913506571557044551247712918732870123249
>>> sqrt(3) - sqrt(2)
0.317837245195782244725757617296174288373133378433432554879127
**Algorithm**
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
uses complex arithmetic to locate all roots simultaneously.
The Durand-Kerner method can be viewed as approximately performing
simultaneous Newton iteration for all the roots. In particular,
the convergence to simple roots is quadratic, just like Newton's
method.
Although all roots are internally calculated using complex arithmetic, any
root found to have an imaginary part smaller than the estimated numerical
error is truncated to a real number (small real parts are also chopped).
Real roots are placed first in the returned list, sorted by value. The
remaining complex roots are sorted by their real parts so that conjugate
roots end up next to each other.
**References**
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
"""
if len(coeffs) <= 1:
if not coeffs or not coeffs[0]:
raise ValueError("Input to poly | roots must not be the zero polynomial")
# Constant polynomial with no roots
return []
orig = ctx.prec
tol = +ctx.eps
with ctx.extraprec(extraprec):
deg = len(coeffs) - 1
# Must be monic
lead = ctx.convert(coeffs[0])
if lead == 1:
coeffs = [ctx.convert(c) for c in coeffs]
else:
coeffs = [c/lead for c in coeffs]
f = lamb | da x: ctx.polyval(coeffs, x)
if roots_init is None:
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
else:
roots = [None]*deg;
deg_init = min(deg, len(roots_init))
roots[:deg_init] = list(roots_init[:deg_init])
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
in xrange(deg_init,deg)]
err = [ctx.one for n in xrange(deg)]
# Durand-Kerner iteration until convergence
for step in xrange(maxsteps):
if abs(max(err)) < tol:
break
for i in xrange(deg):
p = roots[i]
x = f(p)
for j in range(deg):
if i != j:
try:
x /= (p-roots[j])
except ZeroDivisionError:
continue
roots[i] = p - x
err[i] = abs(x)
if abs(max(err)) >= tol:
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
% maxsteps)
# Remove small real or imaginary parts
if cleanup:
for i in xrange(deg):
if abs(roots[i]) < tol:
roots[i] = ctx.zero
elif abs(ctx._im(roots[i])) < tol:
roots[i] = roots[i].real
elif abs(ctx._re(roots[i])) < tol:
roots[i] = roots[i].imag * 1j
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
if error:
err = max(err)
err = max(err, ctx.ldexp(1, -orig+1))
return [+r for r in roots], +err
else:
return [+r for r in roots]
|
kobejean/tensorflow | tensorflow/contrib/eager/python/examples/revnet/main.py | Python | apache-2.0 | 9,700 | 0.007835 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager execution workflow with RevNet train on CIFAR-10."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import cifar_input
from tensorflow.contrib.eager.python.examples.revnet import config as config_
from tensorflow.contrib.eager.python.examples.revnet import revnet
tfe = tf.contrib.eager
def apply_gradients(optimizer, grads, vars_, global_step=None):
"""Functional style apply_grads for `tfe.defun`."""
optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
def main(_):
"""Eager execution workflow with RevNet trained on CIFAR-10."""
tf.enable_eager_execution()
config = get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(
data_dir=FLAGS.data_dir, config=config)
model = revnet.RevNet(config=config)
global_step = tf.train.get_or_create_global_step() # Ensure correct summary
global_step.assign(1)
learning_rate = tf.train.piecewise_constant(
global_step, config.lr_decay_steps, config.lr_list)
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=config.momentum)
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=global_step)
if FLAGS.use_defun:
model.call = tfe.defun(model.call)
model.compute_gradients = tfe.defun(model.compute_gradients)
model.get_moving_stats = tfe.defun(model.get_moving_stats)
model.restore_moving_stats = tfe.defun(model.restore_moving_stats)
global apply_gradients # pylint:disable=global-variable-undefined
apply_gradients = tfe.defun(apply_gradients)
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
checkpointer.restore(latest_path)
print("Restored latest checkpoint at path:\"{}\" "
"with global_step: {}".format(latest_path, global_step.numpy()))
sys.stdout.flush()
for x, y in ds_train:
train_one_iter(model, x, y, optimizer, global_step=global_step)
if global_step.numpy() % config.log_every == 0:
it_test = ds_test.make_one_shot_iterator()
acc_test, loss_test = evaluate(model, it_test)
if FLAGS.validate:
it_train = ds_train_one_shot.make_one_shot_iterator()
it_validation = ds_validation.make_one_shot_iterator()
acc_train, loss_train = evaluate(model, it_train)
acc_validation, loss_validation = evaluate(model, it_validation)
print("Iter {}, "
"training set accuracy {:.4f}, loss {:.4f}; "
"validation set accuracy {:.4f}, loss {:.4f}; "
"test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_train, loss_train, acc_validation,
loss_validation, acc_test, loss_test))
else:
print("Iter {}, test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_test, loss_test))
sys.stdout.flush()
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("Test accuracy", acc_test)
tf.contrib.summary.scalar("Test loss", loss_test)
if FLAGS.validate:
tf.contrib.summary.scalar("Training accuracy", acc_train)
tf.contrib.summary.scalar("Training loss", loss_train)
tf.contrib.summary.scalar("Validation accuracy", acc_validation)
tf.contrib.summary.scalar("Validation loss", loss_validation)
if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
saved_path = checkpointer.save(
file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
print("Saved checkpoint at path: \"{}\" "
"with global_step: {}".format(saved_path, global_step.numpy()))
sys.stdout.flush()
def get_config(config_name="revnet-38", dataset="cifar-10"):
"""Return configuration."""
print("Config: {}".format(config_name))
sys.stdout.flush()
config = {
"revnet-38": config_.get_hparams_cifar_38(),
"revnet-110": config_.get_hparams_cifar_110(),
"revnet-164": config_.get_hparams_cifar_164(),
}[config_name]
if dataset == "cifar-10":
config.add_hparam("n_classes", 10)
config.add_hparam("dataset", "cifar-10")
else:
config.add_hparam("n_classes", 100)
config.add_hparam("dataset", "cifar-100")
return config
def get_datasets(data_dir, config):
"""Return dataset."""
if data_dir is None:
raise ValueError("No supplied data directory")
if not os.path.exists(data_dir):
raise ValueError("Data directory {} does not exist".format(data_dir))
if config.dataset not in ["cifar-10", "cifar-100"]:
raise ValueError("Unknown dataset {}".format(config.dataset))
print("Training on {} dataset.".format(config.dataset))
sys.stdout.flush()
data_dir = os.path.join(data_dir, config.dataset)
if FLAGS.validate:
# 40k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
# 10k Training set
ds_validation = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="validation",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
else:
# 50k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=True,
batch_size=config.batch_size,
epochs=config.epochs,
shuffle=config.shuffle,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.batch_size)
ds_validation = None
# Always compute loss and accuracy on whole test set
ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=False,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
ds_test = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="test",
data_aug=False,
batch_size=config.eval_batch_size,
epochs=1,
shuffle=Fal | se,
data_format=config.data_format,
dtype=config.dtype,
prefetch=config.eval_batch_size)
return ds_train, ds_train_one_shot, ds_validation, ds_test
def train_one_iter(model, inputs, | labels, optimizer, global_step=None):
"""Train for one iteration."""
logits, saved_hiddens = model(inputs, training=True)
values = model.get_moving_stats()
grads, loss = model.compute_gradients(saved_hiddens, labels)
# Restore moving averages when executing eagerly to avoid updating twice
model.restore_moving_stats(values)
apply_gradients(
optimizer, grads, model.trainable_variables, global_step=global_step)
return l |
AlexSzatmary/bake | examples/poisson/myBake/__init__.py | Python | mit | 19 | 0.052632 | #from api | import *
| |
mitodl/micromasters | grades/tasks.py | Python | bsd-3-clause | 7,724 | 0.002071 | """
Tasks for the grades app
"""
import logging
from celery import group
from celery.result import GroupResult
from django.contrib.auth.models import User
from django.core.cache import caches
from django.db import IntegrityError
from django.db.models import OuterRef, Exists
from django_redis import get_redis_connection
from courses.models import CourseRun, Course
from grades import api
from grades.constants import FinalGradeStatus
from grades.models import (
FinalGrade,
ProctoredExamGrade,
MicromastersCourseCertificate,
CourseRunGradingStatus,
CombinedFinalGrade,
)
from micromasters.celery import app
from micromasters.utils import chunks, now_in_utc
CACHE_ID_BASE_STR = "freeze_grade_{0}"
log = logging.getLogger(__name__)
cache_redis = caches['redis']
@app.task
def generate_course_certificates_for_fa_students():
"""
Creates any missing unique course-user FACourseCertificates
"""
courses = Course.objects.filter(
program__live=True,
program__financial_aid_availability=True
)
for course in courses:
if not course.has_frozen_runs():
continue
course_certificates = MicromastersCourseCertificate.objects.filter(
course=course,
user=OuterRef('user')
)
# Find users that pass | ed the course but don't have a certificate yet
users_need_cert = FinalGrade.objects.annotate(
course_certificate=Exists(course_certificates)
).filter(
course_run__course=course,
status=FinalGradeS | tatus.COMPLETE,
passed=True,
course_certificate=False
).values_list('user', flat=True)
if course.has_exam:
# need also to pass exam
users_need_cert = ProctoredExamGrade.objects.filter(
course=course,
passed=True,
exam_run__date_grades_available__lte=now_in_utc(),
user__in=users_need_cert
).values_list('user', flat=True)
for user in users_need_cert:
try:
MicromastersCourseCertificate.objects.get_or_create(
user_id=user,
course=course
)
except (IntegrityError, MicromastersCourseCertificate.DoesNotExist):
log.exception(
"Unable to fetch or create certificate for user id: %d and course: %s",
user,
course.title
)
@app.task
def create_combined_final_grades():
"""
Creates any missing CombinedFinalGrades
"""
courses = Course.objects.filter(
program__live=True,
program__financial_aid_availability=True
)
for course in courses:
if course.has_frozen_runs() and course.has_exam:
exam_grades = ProctoredExamGrade.objects.filter(
course=course,
passed=True,
exam_run__date_grades_available__lte=now_in_utc()
)
users_with_grade = set(CombinedFinalGrade.objects.filter(course=course).values_list('user', flat=True))
for exam_grade in exam_grades:
if exam_grade.user.id not in users_with_grade:
api.update_or_create_combined_final_grade(exam_grade.user, course)
@app.task
def find_course_runs_and_freeze_grades():
"""
Async task that takes care of finding all the course
runs that can freeze the final grade to their students.
Args:
None
Returns:
None
"""
runs_to_freeze = CourseRun.get_freezable()
for run in runs_to_freeze:
freeze_course_run_final_grades.delay(run.id)
@app.task
def freeze_course_run_final_grades(course_run_id):
"""
Async task manager to freeze all the users' final grade in a course run
Args:
course_run_id (int): a course run id
Returns:
None
"""
course_run = CourseRun.objects.get(id=course_run_id)
# no need to do anything if the course run is not ready
if not course_run.can_freeze_grades:
log.info('the grades course "%s" cannot be frozen yet', course_run.edx_course_key)
return
# if it has already completed, do not do anything
if CourseRunGradingStatus.is_complete(course_run):
log.info('Final Grades freezing for course run "%s" has already been completed', course_run.edx_course_key)
return
# cache id string for this task
cache_id = CACHE_ID_BASE_STR.format(course_run.edx_course_key)
# try to get the result id from a previous iteration of this task for this course run
group_results_id = cache_redis.get(cache_id)
# if the id is not none, it means that this task already run before for this course run
# so we need to check if its subtasks have finished
if group_results_id is not None:
# delete the entry from the cache (if needed it will be added again later)
cache_redis.delete(cache_id)
# extract the results from the id
results = GroupResult.restore(group_results_id, app=app)
# if the subtasks are not done, revoke them
results.revoke()
# delete the results anyway
results.delete()
# extract the users to be frozen for this course
user_ids_qset = api.get_users_without_frozen_final_grade(course_run).values_list('id', flat=True)
# find number of users for which cache could not be updated
con = get_redis_connection("redis")
failed_users_cache_key = api.CACHE_KEY_FAILED_USERS_BASE_STR.format(course_run.edx_course_key)
failed_users_count = con.llen(failed_users_cache_key)
# get the list of users that failed authentication last run of the task
failed_users_list = list(map(int, con.lrange(failed_users_cache_key, 0, failed_users_count)))
users_need_freeze = list(user_ids_qset)
users_left = list(set(users_need_freeze) - set(failed_users_list))
# if there are no more users to be frozen, just complete the task
if not users_left:
log.info('Completing grading with %d users getting refresh cache errors', len(failed_users_list))
CourseRunGradingStatus.set_to_complete(course_run)
con.delete(failed_users_cache_key)
return
# if the task reaches this point, it means there are users still to be processed
# clear the list for users for whom cache update failed
con.delete(failed_users_cache_key)
# create an entry in with pending status ('pending' is the default status)
CourseRunGradingStatus.create_pending(course_run=course_run)
# create a group of subtasks to be run in parallel
job = group(
freeze_users_final_grade_async.s(list_user_ids, course_run.id) for list_user_ids in chunks(user_ids_qset)
)
results = job.apply_async()
# save the result ID in the celery backend
results.save()
# put the results id in the cache to be retrieved and finalized later
cache_redis.set(cache_id, results.id, None)
@app.task
def freeze_users_final_grade_async(user_ids, course_run_id):
"""
Async task to freeze the final grade in a course run for a list of users.
Args:
user_ids (list): a list of django user ids
course_run_id (int): a course run id
Returns:
None
"""
# pylint: disable=bare-except
course_run = CourseRun.objects.get(id=course_run_id)
for user in User.objects.filter(id__in=user_ids):
try:
api.freeze_user_final_grade(user, course_run)
except:
log.exception(
'Impossible to freeze final grade for user "%s" in course %s',
user.username, course_run.edx_course_key
)
|
ITU-DB-MANAGEMENT-HM/itunder-backend-rest | controllers/lecturers.py | Python | apache-2.0 | 2,001 | 0.003998 | import re
import json
import requests
from flask import request
from constants import MOBIL_ITU_AUTH_URL
from middlewares import auth_func
from server import app
from models.setupdb import lecturers
@app.route('/api/lecturers', methods=['GET', 'POST', 'DELETE', 'PUT'])
def lecturer():
"""
GET request shows lecturers of given department\n
POST request creates new lecturer\n
DELETE request deletes a lecturer\n
PUT request updates the lecturer
"""
# Works on frontend
if request.method == 'GET':
try:
dep = (request.args["dep"])
app.logger.debug(app)
return json.dumps(lecturers.listLecturersOfDepartment(data=dep))
except:
return "Please give proper department", 404
elif request.method == 'POST':
data = request.get_json()
isEmailExists = data.get("email", -1)
if isEmailExists == -1:
return "Please provide email address!"
| if not re.match("[^@]+@[^@]+\.[^@]+", data['email']):
return "Invalid email address!", 404
lecturers.addLecturer(data)
return "Success", 200
elif request.method == 'DELETE':
data = request.get_json()
lecturers.removeLecturer(data=data)
try:
dep = (request.args["dep"])
return json.dumps( | lecturers.listLecturersOfDepartment(data=dep))
except:
return "Please give proper department", 200
elif request.method == 'PUT':
data = request.get_json()
json.dumps(lecturers.updateLecturer(data))
try:
dep = (request.args["dep"])
return json.dumps(lecturers.listLecturersOfDepartment(data=dep))
except:
return "Please give proper department", 200
@app.route('/api/lecturers/<lid>', methods=['GET'])
def show_a_lecturer(lid):
if request.method == 'GET':
return json.dumps(lecturers.showALecturer(data={'id': lid}))
|
mgadi/naemonbox | sources/psdash/gevent-1.0.1/greentest/test__socket_timeout.py | Python | gpl-2.0 | 1,258 | 0.000795 | import sys
import gevent
from gevent import socket
import greentest
class Test(greentest.TestCase):
def start(self):
self.server = socket.socket()
self.server.bind(('127.0.0.1', 0))
self.server.listen(1)
self.server_port = self.server.getsockname()[1]
self.acceptor = gevent.spawn(self.server.accept)
def stop(self):
self.server.close()
self. | acce | ptor.kill()
del self.acceptor
del self.server
def test(self):
self.start()
try:
sock = socket.socket()
sock.connect(('127.0.0.1', self.server_port))
try:
sock.settimeout(0.1)
try:
result = sock.recv(1024)
raise AssertionError('Expected timeout to be raised, instead recv() returned %r' % (result, ))
except socket.error:
ex = sys.exc_info()[1]
self.assertEqual(ex.args, ('timed out',))
self.assertEqual(str(ex), 'timed out')
self.assertEqual(ex[0], 'timed out')
finally:
sock.close()
finally:
self.stop()
if __name__ == '__main__':
greentest.main()
|
lakshmi-kannan/st2contrib | packs/twitter/sensors/twitter_search_sensor.py | Python | apache-2.0 | 3,371 | 0.000593 | from TwitterSearch import TwitterSearch
from TwitterSearch import TwitterSearchOrder
from st2reactor.sensor.base import PollingSensor
__all__ = [
'TwitterSearchSensor'
]
BASE_URL = 'https://twitter.com'
class TwitterSearchSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=None):
super(TwitterSearchSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'twitter.matched_tweet'
self._logger = self._sensor_service.get_logger(__name__)
def setup(self):
self._client = TwitterSearch(
consumer_key=self._config['consumer_key'],
consumer_secret=self._config['consumer_secret'],
access_token=self._config['access_token'],
access_token_secret=self._config['access_token_secret']
)
self._last_id = None
def poll(self):
tso = TwitterSearchOrder()
tso.set_keywords([self._config['query']])
language = self._config.get('language', None)
if language:
tso.set_language(language)
tso.set_result_type('recent')
tso.set_count(self._config.get('count', 30))
tso.set_include_entities(False)
last_id = self._get_last_id()
if last_id:
tso.set_since_id(int(last_id))
try:
tweets = self._client.search_tweets(tso)
tweets = tweets['content']['statuses']
except Exception as e:
self._lo | gger.exception('Polling Twitter failed: %s' % (str(e)))
return
tweets = list(reversed(tweets))
if tweets:
self._set_last_id(last_id=tweets[-1]['id'])
for tweet in tweets:
self._dispatch_trigger_for_tweet(tweet=tweet)
def cleanup(self):
pass
def add_trigger(self, trigger) | :
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _get_last_id(self):
if not self._last_id and hasattr(self._sensor_service, 'get_value'):
self._last_id = self._sensor_service.get_value(name='last_id')
return self._last_id
def _set_last_id(self, last_id):
self._last_id = last_id
if hasattr(self._sensor_service, 'set_value'):
self._sensor_service.set_value(name='last_id', value=last_id)
def _dispatch_trigger_for_tweet(self, tweet):
trigger = self._trigger_ref
url = '%s/%s/status/%s' % (BASE_URL, tweet['user']['screen_name'], tweet['id'])
payload = {
'id': tweet['id'],
'created_at': tweet['created_at'],
'lang': tweet['lang'],
'place': tweet['place'],
'retweet_count': tweet['retweet_count'],
'favorite_count': tweet['favorite_count'],
'user': {
'screen_name': tweet['user']['screen_name'],
'name': tweet['user']['name'],
'location': tweet['user']['location'],
'description': tweet['user']['description'],
},
'text': tweet['text'],
'url': url
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
|
BhallaLab/moose | moose-examples/squid/electronics.py | Python | gpl-3.0 | 3,885 | 0.008237 | # electronics.py ---
#
# Filename: electronics.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Feb 22 00:53:38 2012 (+0530)
# Version:
# Last-Updated: Tue Jul 10 10:28:40 2012 (+0530)
# By: subha
# Update #: 221
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
# 2012-02-22 23:22:30 (+0530) Subha - the circuitry put in a class.
#
# Code:
import numpy
import moose
class ClampCircuit(moose.Neutral):
"""Container for a Voltage-Clamp/Current clamp circuit."""
defaults = {
'level1': 25.0,
'width1': 50.0,
'delay1': 2.0,
'delay2': 1e6,
'trigMode': 0,
'delay3': 1e9
}
def __init__(self, path, compartment):
moose.Neutral.__init__(self, path)
self.pulsegen = moose.PulseGen(path+"/pulse") # holding voltage/current generator
self.pulsegen.count = 2
self.pulsegen.firstLevel = 25.0
self.pulsegen.firstWidth = 50.0
self.pulsegen.firstDelay = 2.0
self.pulsegen.secondDelay = 0.0
self.pulsegen.trigMode = 2
self.gate = moose.PulseGen(path+"/gate") # holding voltage/current generator
self.gate.level[0] = 1.0
self.gate.delay[0] = 0.0
self.gate.width[0] = 1e9
moose.connect(self.gate, 'output', self.pulsegen, 'input')
self.lowpass = moose.RC(path+"/lowpass") # lowpass filter
self.lowpass.R = 1.0
self.lowpass.C = 0.03
self.vclamp = moose.DiffAmp(path | +"/vclamp")
self.vclamp.gain = 0.0
self.vclamp.saturation = 1e10
self.iclamp = moose.DiffAmp(path+"/iclamp")
self.iclamp.gain = 0.0
self.iclamp.saturation = 1e10
self.pid = moose.PIDController(path+"/pid")
self.pid.gain = 0.5
self.pid.tauI = 0.02
self.pid.tauD = 0.005
self.pid.saturation = 1e10
# Connect current clamp circuitry
moos | e.connect(self.pulsegen, "output", self.iclamp, "plusIn")
moose.connect(self.iclamp, "output", compartment, "injectMsg")
# Connect voltage clamp circuitry
moose.connect(self.pulsegen, "output", self.lowpass, "injectIn")
moose.connect(self.lowpass, "output", self.vclamp, "plusIn")
moose.connect(self.vclamp, "output", self.pid, "commandIn")
moose.connect(compartment, "VmOut", self.pid, "sensedIn")
moose.connect(self.pid, "output", compartment, "injectMsg")
current_table = moose.Table("/data/Im")
moose.connect(current_table, "requestOut", compartment, "getIm")
def configure_pulses(self, baseLevel=0.0, firstLevel=0.1, firstDelay=5.0, firstWidth=40.0, secondLevel=0.0, secondDelay=1e6, secondWidth=0.0, singlePulse=True):
"""Set up the pulse generator."""
self.pulsegen.baseLevel = baseLevel
self.pulsegen.firstLevel = firstLevel
self.pulsegen.firstWidth = firstWidth
self.pulsegen.firstDelay = firstDelay
self.pulsegen.secondLevel = secondLevel
self.pulsegen.secondDelay = secondDelay
self.pulsegen.secondWidth = secondWidth
if singlePulse:
self.pulsegen.trigMode = 1
else:
self.pulsegen.trigMode = 0
def do_voltage_clamp(self):
"""Switch to voltage clamp circuitry. After this the simdt may
need to be changed for correct performance."""
self.vclamp.gain = 1.0
self.iclamp.gain = 0.0
self.pid.gain = 0.5
self.pid.tauD = 0.005
self.pid.tauI = 0.02
def do_current_clamp(self):
"""Switch to current clamp circuitry. After this the simdt may
need to be changed for correct performance."""
self.iclamp.gain = 1.0
self.vclamp.gain = 0.0
self.pid.gain = 0.0
#
# electronics.py ends here
|
tonygalmiche/is_plastigray | report/is_article_sans_cde_ouverte_fou.py | Python | mit | 3,344 | 0.007478 | # -*- coding: utf-8 -*-
from openerp import tools
from openerp import models,fields,api
from openerp.tools.translate import _
class is_article_sans_cde_ouverte_fou(models.Model):
_name='is.article.sans.cde.ouverte.fou'
_order='product_id'
_auto = False
product_id = fields.Many2one('product.template', 'Article')
is_category_id = fields.Many2one('is.category', 'Catégorie')
is_gestionnaire_id = fields.Many2one('is.gestionnaire', 'Gestionnaire')
product_nb_fournisseurs = fields.Integer('Nb fournisseurs')
product_partner_id = fields.Many2one('res.partner', 'Fournisseur fiche article')
nb_cde_ouverte = fields.Integer('Nb commandes ouvertes')
cde_ouverte_partner_id = fields.Many2one('is.cde.ouverte.fournisseur', 'Commande ouverte fournisseur')
def init(self, cr):
tools.drop_view_if_exists(cr, 'is_article_sans_cde_ouverte_fou')
cr.execute("""
CREATE OR REPLACE FUNCTION get_product_fournisseur_id(pt_id integer) RETURNS integer AS $$
BEGIN
RETURN (
select ps.name
from product_supplierinfo ps
where ps.product_tmpl_id=pt_id
order by ps.sequence limit 1
);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE view is_article_sans_cde_ouverte_fou AS (
select
pt2.id,
pt2.product_id,
pt2.is_category_id,
| pt2.is_gestionnaire_id,
pt2.product_nb_fournisseurs,
pt2.product_p | artner_id,
pt2.nb_cde_ouverte,
pt2.cde_ouverte_partner_id
from (
select
pt.id,
pt.id product_id,
pt.is_category_id,
pt.is_gestionnaire_id,
(
select count(*) from product_supplierinfo ps where pt.id=ps.product_tmpl_id
) product_nb_fournisseurs,
get_product_fournisseur_id(pt.id) product_partner_id,
(
select count(*)
from is_cde_ouverte_fournisseur cof inner join is_cde_ouverte_fournisseur_product cofp on cof.id=cofp.order_id
where cofp.product_id=pp.id
) nb_cde_ouverte,
(
select cof.id
from is_cde_ouverte_fournisseur cof inner join is_cde_ouverte_fournisseur_product cofp on cof.id=cofp.order_id
where cofp.product_id=pp.id and cof.partner_id=get_product_fournisseur_id(pt.id)
limit 1
) cde_ouverte_partner_id
from product_template pt inner join product_product pp on pt.id=pp.product_tmpl_id
where pt.active='t' and pt.purchase_ok='t'
) pt2 inner join is_category ic on pt2.is_category_id=ic.id
where ic.name::int<70
)
""")
|
Alir3z4/python-currencies | tests/test_currency.py | Python | gpl-3.0 | 1,997 | 0.000501 | from unittest import TestCase
from currencies import Currency, get_version, __VERSION__
from currencies.exceptions import CurrencyDoesNotExist
class TestCurrency(TestCase):
def test_get_version(self):
version = get_version()
self.assertIsInstance(version, str)
self.assertEqual(len(vers | ion.split('.')), len(__VERSION__))
def test_get_money_currency(self):
currency = Currency('USD')
self.assertIsInstance(curren | cy.get_money_currency(), str)
self.assertEqual(currency.get_money_currency(), 'USD')
def test_set_money_currency(self):
currency = Currency('USD')
self.assertEqual(currency.get_money_currency(), 'USD')
self.assertEqual(currency.get_money_format(13), '$13')
currency.set_money_currency('AED')
self.assertEqual(currency.get_money_currency(), 'AED')
self.assertEqual(currency.get_money_format(13), 'Dhs. 13')
def test_get_currency_formats(self):
currency_formats = Currency.get_currency_formats()
self.assertIsNotNone(currency_formats)
self.assertIsInstance(currency_formats, list)
self.assertGreater(len(currency_formats), 0)
def test_get_money_format(self):
currency = Currency('USD')
self.assertEqual(currency.get_money_format(13), '$13')
self.assertEqual(currency.get_money_format(13.99), '$13.99')
self.assertEqual(
currency.get_money_format('13,2313,33'),
'$13,2313,33'
)
def test_get_money_with_currency_format(self):
currency = Currency('USD')
self.assertEqual(currency.get_money_with_currency_format(13.99), '$13.99 USD')
self.assertEqual(
currency.get_money_with_currency_format('13,2313,33'),
'$13,2313,33 USD'
)
def test_does_not_exist_currency(self):
self.assertRaises(
CurrencyDoesNotExist,
Currency,
money_currency='BingoMingo'
)
|
uptown/django-town | django_town/oauth2/management/commands/update_default_scope.py | Python | mit | 452 | 0.00885 | from django.core.management.base import BaseCommand
from django_town.oauth2.models import Client
from django_town.core.settings import OAUTH2_SETTINGS
class Command(BaseCommand):
def handle(self, *args, **options):
Client.objects.all().update(available_scope=OAUTH2_SETTINGS.default_scope)
print Clien | t.objects.all()[0].available_scope
# print Client.objects.create(name=args[0], service=Servic | e.objects.get(name=args[1])) |
foobarbazblarg/stayclean | stayclean-2020-december/venv/lib/python3.8/site-packages/pip/_vendor/resolvelib/providers.py | Python | mit | 5,091 | 0 | class AbstractProvider(object):
"""Delegate class to provide requirement interface for the resolver."""
def identify(self, requirement_or_candidate):
"""Given a requirement or candidate, return an identifier for it.
This is used in many places to identify a requirement or candidate,
e.g. whether two requirements should have their specifier parts merged,
whether two candidates would conflict with each other (because they
have same name but different versions).
"""
raise NotImplementedError
def get_preference(self, resolution, candidates, information):
"""Produce a sort key for given requirement based on preference.
The preference is defined as "I think this requirement should be
resolved first". The lower the return value is, the more preferred
this group of arguments is.
:param resolution: Currently pinned candidate, or `No | ne`.
:param candida | tes: An iterable of possible candidates.
:param information: A list of requirement information.
The `candidates` iterable's exact type depends on the return type of
`find_matches()`. A sequence is passed-in as-is if possible. If it
returns a callble, the iterator returned by that callable is passed
in here.
Each element in `information` is a named tuple with two entries:
* `requirement` specifies a requirement contributing to the current
candidate list.
* `parent` specifies the candidate that provides (dependend on) the
requirement, or `None` to indicate a root requirement.
The preference could depend on a various of issues, including (not
necessarily in this order):
* Is this package pinned in the current resolution result?
* How relaxed is the requirement? Stricter ones should probably be
worked on first? (I don't know, actually.)
* How many possibilities are there to satisfy this requirement? Those
with few left should likely be worked on first, I guess?
* Are there any known conflicts for this requirement? We should
probably work on those with the most known conflicts.
A sortable value should be returned (this will be used as the `key`
parameter of the built-in sorting function). The smaller the value is,
the more preferred this requirement is (i.e. the sorting function
is called with `reverse=False`).
"""
raise NotImplementedError
def find_matches(self, requirements):
"""Find all possible candidates that satisfy the given requirements.
This should try to get candidates based on the requirements' types.
For VCS, local, and archive requirements, the one-and-only match is
returned, and for a "named" requirement, the index(es) should be
consulted to find concrete candidates for this requirement.
The return value should produce candidates ordered by preference; the
most preferred candidate should come first. The return type may be one
of the following:
* A callable that returns an iterator that yields candidates.
* An collection of candidates.
* An iterable of candidates. This will be consumed immediately into a
list of candidates.
:param requirements: A collection of requirements which all of the
returned candidates must match. All requirements are guaranteed to
have the same identifier. The collection is never empty.
"""
raise NotImplementedError
def is_satisfied_by(self, requirement, candidate):
"""Whether the given requirement can be satisfied by a candidate.
The candidate is guarenteed to have been generated from the
requirement.
A boolean should be returned to indicate whether `candidate` is a
viable solution to the requirement.
"""
raise NotImplementedError
def get_dependencies(self, candidate):
"""Get dependencies of a candidate.
This should return a collection of requirements that `candidate`
specifies as its dependencies.
"""
raise NotImplementedError
class AbstractResolver(object):
"""The thing that performs the actual resolution work."""
base_exception = Exception
def __init__(self, provider, reporter):
self.provider = provider
self.reporter = reporter
def resolve(self, requirements, **kwargs):
"""Take a collection of constraints, spit out the resolution result.
This returns a representation of the final resolution state, with one
guarenteed attribute ``mapping`` that contains resolved candidates as
values. The keys are their respective identifiers.
:param requirements: A collection of constraints.
:param kwargs: Additional keyword arguments that subclasses may accept.
:raises: ``self.base_exception`` or its subclass.
"""
raise NotImplementedError
|
shiminasai/ciat_plataforma | ciat_plataforma/context.py | Python | mit | 165 | 0.012121 | from comunicacion.foros.mode | ls import Imagen
def globales(request):
imagenes_global = Imagen.objects.all()[:9]
return {'imagenes_global':imagen | es_global} |
raonyguimaraes/mendelmd | individuals/templatetags/upload_tags.py | Python | bsd-3-clause | 3,137 | 0.00255 | from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def upload_js():
return mark_safe("""
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td>
<span class="preview"></span>
</td>
<td>
<p class="name">{%=file.name%}</p>
{% if (file.error) { %}
<div><span class="label label-important">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<p class="size">{%=o.formatFileSize(file.size)%}</p>
{% if (!o.files.error) { %}
<div class="progress progress-striped active" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="0"><div class="progress-bar progress-bar-success" style="width:0%;"></div></div>
{% } %}
</td>
<td>
{% if (!o.files.error && !i && !o.options.autoUpload) { %}
<button class="btn btn-primary start">
| <i class="glyphicon glyphicon-upload"></i>
<span>{%=locale.fileupload.start%}</span>
</button>
{% } %}
{% if (!i) { %}
<button class="btn btn-warning cancel">
| <i class="glyphicon glyphicon-ban-circle"></i>
<span>{%=locale.fileupload.cancel%}</span>
</button>
{% } %}
</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
<td>
<span class="preview">
{% if (file.thumbnailUrl) { %}
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" data-gallery><img src="{%=file.thumbnailUrl%}"></a>
{% } %}
</span>
</td>
<td>
<p class="name">
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" {%=file.thumbnailUrl?'data-gallery':''%}>{%=file.name%}</a>
</p>
{% if (file.error) { %}
<div><span class="label label-important">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<span class="size">{%=o.formatFileSize(file.size)%}</span>
</td>
<td>
<button class="btn btn-danger delete" data-type="{%=file.deleteType%}" data-url="{%=file.deleteUrl%}"{% if (file.deleteWithCredentials) { %} data-xhr-fields='{"withCredentials":true}'{% } %}>
<i class="glyphicon glyphicon-trash"></i>
<span>{%=locale.fileupload.destroy%}</span>
</button>
<input type="checkbox" name="delete" value="1" class="toggle">
</td>
</tr>
{% } %}
</script>
""")
|
mick-d/nipype | nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py | Python | bsd-3-clause | 1,430 | 0.021678 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..tracks import StreamlineTractography
def test_StreamlineTractography_inputs():
input_map | = dict(gfa_thresh=dict(mandatory=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault | =True,
),
in_file=dict(mandatory=True,
),
in_model=dict(),
in_peaks=dict(),
min_angle=dict(mandatory=True,
usedefault=True,
),
multiprocess=dict(mandatory=True,
usedefault=True,
),
num_seeds=dict(mandatory=True,
usedefault=True,
),
out_prefix=dict(),
peak_threshold=dict(mandatory=True,
usedefault=True,
),
save_seeds=dict(mandatory=True,
usedefault=True,
),
seed_coord=dict(),
seed_mask=dict(),
tracking_mask=dict(),
)
inputs = StreamlineTractography.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_StreamlineTractography_outputs():
output_map = dict(gfa=dict(),
odf_peaks=dict(),
out_seeds=dict(),
tracks=dict(),
)
outputs = StreamlineTractography.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
jdelacueva/django-isegory | setup.py | Python | agpl-3.0 | 1,049 | 0 | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).r | ead()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-isegory',
version='0.1',
packages=['isegory'],
include_package_data=True,
license='AGPL',
description='A simple Django app to declare the provenance of a dataset.',
long_description=README,
url='http://github.com/jdelacueva/django-isegory/',
author='Javier de la Cueva',
author_emai | l='jdelacueva@derecho-internet.org',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: AGPL',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
Flexget/Flexget | flexget/components/managed_lists/lists/regexp_list/cli.py | Python | mit | 4,835 | 0.003723 | import re
from argparse import ArgumentParser, ArgumentTypeError
from flexget import options
from flexget.event import event
from flexget.terminal import TerminalTable, console, table_parser
from flexget.utils.database import Session
from . import db
def do_cli(manager, options):
"""Handle regexp-list cli"""
action_map = {
'all': action_all,
'list': action_list,
'add': action_add,
'del': action_del,
'purge': action_purge,
}
action_map[options.regexp_action](options)
def action_all(options):
"""Show all regexp lists"""
lists = db.get_regexp_lists()
header = ['#', 'List Name']
table = TerminalTable(*header, table_type=options.table_type)
for regexp_list in lists:
table.add_row(str(regexp_list.id), regexp_list.name)
console(table)
def action_list(options):
"""List regexp list"""
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
table = TerminalTable('Regexp', table_type=options.table_type)
regexps = db.get_regexps_by_list_id(
regexp_list.id, order_by='added', descending=True, session=session
)
for regexp in regexps:
table.add_row(regexp.regexp or '')
console(table)
def action_add(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}, creating'.format(options.list_name))
regexp_list = db.create_list(options.list_name, session=session)
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if not regexp:
console("Adding regexp {} to list {}".format(options.regexp, regexp_list.name))
db.add_to_list_by_name(regexp_list.name, options.regexp, session=session)
console(
'Successfully added regexp {} to regexp list {} '.format(
options.regexp, regexp_list.name
| )
)
else:
console("Regexp {} already exists in list {}".format(options.regexp, regexp_list.name))
def action_del(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with | name {}'.format(options.list_name))
return
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if regexp:
console('Removing regexp {} from list {}'.format(options.regexp, options.list_name))
session.delete(regexp)
else:
console(
'Could not find regexp {} in list {}'.format(
options.movie_title, options.list_name
)
)
return
def action_purge(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
console('Deleting list %s' % options.list_name)
session.delete(regexp_list)
def regexp_type(regexp):
try:
re.compile(regexp)
return regexp
except re.error as e:
raise ArgumentTypeError(e)
@event('options.register')
def register_parser_arguments():
# Common option to be used in multiple subparsers
regexp_parser = ArgumentParser(add_help=False)
regexp_parser.add_argument('regexp', type=regexp_type, help="The regexp")
list_name_parser = ArgumentParser(add_help=False)
list_name_parser.add_argument(
'list_name', nargs='?', help='Name of regexp list to operate on', default='regexps'
)
# Register subcommand
parser = options.register_command('regexp-list', do_cli, help='View and manage regexp lists')
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='regexp_action')
subparsers.add_parser('all', parents=[table_parser], help='Shows all existing regexp lists')
subparsers.add_parser(
'list', parents=[list_name_parser, table_parser], help='List regexp from a list'
)
subparsers.add_parser(
'add', parents=[list_name_parser, regexp_parser], help='Add a regexp to a list'
)
subparsers.add_parser(
'del', parents=[list_name_parser, regexp_parser], help='Remove a regexp from a list'
)
subparsers.add_parser(
'purge', parents=[list_name_parser], help='Removes an entire list. Use with caution!'
)
|
google-research/google-research | cascaded_networks/models/tdl.py | Python | apache-2.0 | 2,301 | 0.008257 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tapped Delay Line handler."""
import torch
import torch.nn as nn
class OneStepDelayKernel(nn.Module):
"""Single slot queue OSD kernel | ."""
def __init__(self, *args, **kwargs):
"""Initialize OSD kernel."""
super().__init__()
self.reset()
def reset(self):
self.state = None
def forward(self, current_state):
if self.state is not None:
prev_state = self.state
else:
prev_state = torch.zeros_like(current_state)
prev_state.requires_grad = True
self.state = current_state.clone()
return prev_state
cla | ss ExponentiallyWeightedSmoothingKernel(nn.Module):
"""Exponentially Weighted Smoothing Kernel.
alpha=0.0
--> state(t) = current_state
Functionally equivalent to sequential ResNet
alpha=1.0
--> state(t) = prev_state
Functionally equivalent to tapped delay line for 1 timestep delay
0.0 < alpha < 1.0
Continuous interpolation between discrete 1 timestep TDL and sequential ResNet
"""
def __init__(self, alpha=0.0):
"""Initialize EWS kernel."""
super().__init__()
self._alpha = alpha
self.reset()
def reset(self):
self.state = None
def forward(self, current_state):
if self.state is not None:
prev_state = self.state
else:
prev_state = torch.zeros_like(current_state)
prev_state.requires_grad = True
self.state = self._alpha*prev_state + (1-self._alpha)*current_state.clone()
return self.state
def setup_tdl_kernel(tdl_mode, kwargs):
"""Temporal kernel interface."""
if tdl_mode == 'OSD':
tdline = OneStepDelayKernel()
elif tdl_mode == 'EWS':
tdline = ExponentiallyWeightedSmoothingKernel(kwargs['tdl_alpha'])
return tdline
|
Asnelchristian/coala-bears | bears/general/LineCountBear.py | Python | agpl-3.0 | 1,107 | 0 | from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class LineCountBear(LocalBear):
LANGUAGES = {'All'}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
def run(se | lf, filename, file, max_lines_per_file: int):
"""
Count the number of lines in a file and ensure that they are
smaller than a given size.
:param max_lines_per_file: Number of lines allowed per file.
"""
file_length | = len(file)
if file_length > max_lines_per_file:
yield Result.from_values(
origin=self,
message=('This file had {count} lines, which is {extra} '
'lines more than the maximum limit specified.'
.format(count=file_length,
extra=file_length-max_lines_per_file)),
severity=RESULT_SEVERITY.NORMAL,
file=filename)
|
ioram7/keystone-federado-pgid2013 | build/python-keystoneclient/keystoneclient/v3/users.py | Python | apache-2.0 | 3,684 | 0 | # Copyright 2011 OpenStack LLC.
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distribute | d under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
class User(base.Resource):
"""Represents an Identity user.
Attributes:
* id: a uuid that identifies the user
"""
pass
class UserManager(base.Crud | Manager):
"""Manager class for manipulating Identity users."""
resource_class = User
collection_key = 'users'
key = 'user'
def _require_user_and_group(self, user, group):
if not (user and group):
msg = 'Specify both a user and a group'
raise exceptions.ValidationError(msg)
def create(self, name, domain=None, project=None, password=None,
email=None, description=None, enabled=True):
return super(UserManager, self).create(
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def list(self, project=None, domain=None, group=None, **kwargs):
"""List users.
If project, domain or group are provided, then filter
users with those attributes.
If ``**kwargs`` are provided, then filter users with
attributes matching ``**kwargs``.
"""
if group:
base_url = '/groups/%s' % base.getid(group)
else:
base_url = None
return super(UserManager, self).list(
base_url=base_url,
domain_id=base.getid(domain),
project_id=base.getid(project),
**kwargs)
def get(self, user):
return super(UserManager, self).get(
user_id=base.getid(user))
def update(self, user, name=None, domain=None, project=None, password=None,
email=None, description=None, enabled=None):
return super(UserManager, self).update(
user_id=base.getid(user),
name=name,
domain_id=base.getid(domain),
project_id=base.getid(project),
password=password,
email=email,
description=description,
enabled=enabled)
def add_to_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).put(
base_url=base_url,
user_id=base.getid(user))
def check_in_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).head(
base_url=base_url,
user_id=base.getid(user))
def remove_from_group(self, user, group):
self._require_user_and_group(user, group)
base_url = '/groups/%s' % base.getid(group)
return super(UserManager, self).delete(
base_url=base_url,
user_id=base.getid(user))
def delete(self, user):
return super(UserManager, self).delete(
user_id=base.getid(user))
|
tadgh/ArgoRevisit | third_party/nltk/chat/suntsu.py | Python | apache-2.0 | 6,223 | 0.00932 | # Natural Language Toolkit: Sun Tsu-Bot
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Sam Huston 2007
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from util import *
"""
Tsu bot responds to all queries with a Sun Tsu sayings
Quoted from Sun Tsu's The Art of War
Translated by LIONEL GILES, M.A. 1910
Hosted by the Gutenberg Project
http://www.gutenberg.org/
"""
pairs = (
(r'quit',
( "Good-bye.",
"Plan well",
"May victory be your future")),
(r'[^\?]*\?',
("Please consider whether you can answer your own question.",
"Ask me no questions!")),
(r'[0-9]+(.*)',
("It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
"There are five essentials for victory")),
(r'[A-Ca-c](.*)',
("The art of war is of vital importance to the State.",
"All warfare is based on deception.",
"If your opponent is secure at all points, be prepared for him. If he is in superior strength, evade him.",
"If the campaign is protracted, the resources of the State will not be equal to the strain.",
"Attack him where he is unprepared, appear where you are not expected.",
"There is no instance of a country having benefited from prolonged warfare.")),
(r'[D-Fd-f](.*)',
("The skillful soldier does not raise a second levy, neither are his supply-wagons loaded more than twice.",
"Bring war material with you from home, but forage on the enemy.",
"In war, then, let your great object be victory, not lengthy campaigns.",
"To fight and conquer in all your battles is not supreme excellence; supreme excellence consists in breaking the enemy's resistance without fighting.")),
(r'[G-Ig-i](.*)',
("Heaven signifies night and day, cold and heat, times and seasons.",
"It is the rule in war, if our forces are ten to the enemy's one, to surround him; if five to one, to attack him; if twice as numerous, to divide our army into two.",
"The good fighters of old first put themselves beyond the possibility of defeat, and then waited for an opportunity of defeating the enemy.",
"One may know how to conquer without being able to do it.")),
(r'[J-Lj-l](.*)',
("There are three ways in which a ruler can bring misfortune upon his army.",
"By commanding the army to advance or to retreat, being ignorant of the fact that it cannot obey. This is called hobbling the a | rmy.",
"By attempting to govern an army in the same way as he administers a kingdom, being ignorant of the conditions which obtain in an army. This causes restlessness in the soldier's minds.",
"By employing the officers of his army without discrimination, throu | gh ignorance of the military principle of adaptation to circumstances. This shakes the confidence of the soldiers.",
"There are five essentials for victory",
"He will win who knows when to fight and when not to fight.",
"He will win who knows how to handle both superior and inferior forces.",
"He will win whose army is animated by the same spirit throughout all its ranks.",
"He will win who, prepared himself, waits to take the enemy unprepared.",
"He will win who has military capacity and is not interfered with by the sovereign.")),
(r'[M-Om-o](.*)',
("If you know the enemy and know yourself, you need not fear the result of a hundred battles.",
"If you know yourself but not the enemy, for every victory gained you will also suffer a defeat.",
"If you know neither the enemy nor yourself, you will succumb in every battle.",
"The control of a large force is the same principle as the control of a few men: it is merely a question of dividing up their numbers.")),
(r'[P-Rp-r](.*)',
("Security against defeat implies defensive tactics; ability to defeat the enemy means taking the offensive.",
"Standing on the defensive indicates insufficient strength; attacking, a superabundance of strength.",
"He wins his battles by making no mistakes. Making no mistakes is what establishes the certainty of victory, for it means conquering an enemy that is already defeated.",
"A victorious army opposed to a routed one, is as a pound's weight placed in the scale against a single grain.",
"The onrush of a conquering force is like the bursting of pent-up waters into a chasm a thousand fathoms deep.")),
(r'[S-Us-u](.*)',
("What the ancients called a clever fighter is one who not only wins, but excels in winning with ease.",
"Hence his victories bring him neither reputation for wisdom nor credit for courage.",
"Hence the skillful fighter puts himself into a position which makes defeat impossible, and does not miss the moment for defeating the enemy.",
"In war the victorious strategist only seeks battle after the victory has been won, whereas he who is destined to defeat first fights and afterwards looks for victory.",
"There are not more than five musical notes, yet the combinations of these five give rise to more melodies than can ever be heard.",
"Appear at points which the enemy must hasten to defend; march swiftly to places where you are not expected.")),
(r'[V-Zv-z](.*)',
("It is a matter of life and death, a road either to safety or to ruin.",
"Hold out baits to entice the enemy. Feign disorder, and crush him.",
"All men can see the tactics whereby I conquer, but what none can see is the strategy out of which victory is evolved.",
"Do not repeat the tactics which have gained you one victory, but let your methods be regulated by the infinite variety of circumstances.",
"So in war, the way is to avoid what is strong and to strike at what is weak.",
"Just as water retains no constant shape, so in warfare there are no constant conditions.")),
(r'(.*)',
( "Your statement insults me.",
""))
)
suntsu_chatbot = Chat(pairs, reflections)
def suntsu_chat():
print "Talk to the program by typing in plain English, using normal upper-"
print 'and lower-case letters and punctuation. Enter "quit" when done.'
print '='*72
print "You seek enlightenment?"
suntsu_chatbot.converse()
def demo():
suntsu_chat()
if __name__ == "__main__":
demo()
|
alphacsc/alphacsc | examples/csc/plot_lfp_data.py | Python | bsd-3-clause | 4,048 | 0.001482 | """
==============================
CSC to learn LFP spiking atoms
==============================
Here, we show how CSC can be used to learn spiking
atoms from Local Field Potential (LFP) data [1].
[1] Hitziger, Sebastian, et al.
Adaptive Waveform Learning: A Framework for Modeling Variability in
Neurophysiological Signals. IEEE | Transactions on Signal Processing (2017).
"""
###############################################################################
# First, let us fetch the data (~14 MB)
import os
from mne.datasets import fetch_dataset
from mne.utils import get_confi | g
url = ('https://github.com/hitziger/AWL/raw/master/Experiments/data/'
'LFP_data_contiguous_1250_Hz.mat')
folder_name = "LFP"
archive_name = "LFP_data_contiguous_1250_Hz.mat"
fname = fetch_dataset(
{"dataset_name": "LFP_data",
"url": url,
"archive_name": archive_name,
"folder_name": folder_name,
"hash": None
},
path=None,
force_update=False
)
fname = os.path.join(fname, archive_name)
###############################################################################
# It is a mat file, so we use scipy to load it
from scipy import io
data = io.loadmat(fname)
X, sfreq = data['X'].T, float(data['sfreq'])
###############################################################################
# And now let us look at the data
import numpy as np
import matplotlib.pyplot as plt
start, stop = 11000, 15000
times = np.arange(start, stop) / sfreq
plt.plot(times, X[0, start:stop], color='b')
plt.xlabel('Time (s)')
plt.ylabel(r'$\mu$ V')
plt.xlim([9., 12.])
###############################################################################
# and filter it using a convenient function from MNE. This will remove low
# frequency drifts, but we keep the high frequencies
from mne.filter import filter_data
X = filter_data(
X.astype(np.float64), sfreq, l_freq=1, h_freq=None, fir_design='firwin')
###############################################################################
# Now, we define the parameters of our model.
reg = 6.0
n_times = 2500
n_times_atom = 350
n_trials = 100
n_atoms = 3
n_iter = 60
###############################################################################
# Let's stick to one random state for now, but if you want to learn how to
# select the random state, consult :ref:`this example
# <sphx_glr_auto_examples_plot_simulate_randomstate.py>`.
random_state = 10
###############################################################################
# Now, we epoch the trials
overlap = 0
starts = np.arange(0, X.shape[1] - n_times, n_times - overlap)
stops = np.arange(n_times, X.shape[1], n_times - overlap)
X_new = []
for idx, (start, stop) in enumerate(zip(starts, stops)):
if idx >= n_trials:
break
X_new.append(X[0, start:stop])
X_new = np.vstack(X_new)
del X
###############################################################################
# We remove the mean and scale to unit variance.
X_new -= np.mean(X_new)
X_new /= np.std(X_new)
###############################################################################
# The convolutions can result in edge artifacts at the edges of the trials.
# Therefore, we discount the contributions from the edges by windowing the
# trials.
from numpy import hamming
X_new *= hamming(n_times)[None, :]
###############################################################################
# Of course, in a data-limited setting we want to use as much of the data as
# possible. If this is the case, you can set `overlap` to non-zero (for example
# half the epoch length).
#
# Now, we run regular CSC since the trials are not too noisy
from alphacsc import learn_d_z
pobj, times, d_hat, z_hat, reg = learn_d_z(X_new, n_atoms, n_times_atom,
reg=reg, n_iter=n_iter,
random_state=random_state, n_jobs=1)
###############################################################################
# Let's look at the atoms now.
plt.figure()
plt.plot(d_hat.T)
plt.show()
|
blamed-cloud/AISuite | alphabeta.py | Python | mit | 6,127 | 0.0315 | #!/usr/bin/env python
#alphabeta.py
from __future__ import print_function
import sys
import random
#import player # I don't think this is needed anymore
import transpositions
UPPER_BOUND = 100
LOWER_BOUND = -100
DEFAULT_DEPTH = 5
VOLATILE_DEPTH = -3
ORDER_NORMAL = 1
ORDER_FREE = 2
#function for returning randomly from the shallowest best children
def shallowest_first(best_child_list):
min_depth = max([tup[2] for tup in best_child_list]) # max because depths start high and go to zero (or negative)
choices = [tup for tup in best_child_list if tup[2] == min_depth]
return random.choice(choices)
shallowest_first.sel = max
#function for returning randomly from the deepest best children
def deepest_first(best_child_list):
max_depth = min([tup[2] for tup in best_child_list]) # min because depths start high and go to zero (or negative)
choices = [tup for tup in best_child_list if tup[2] == max_depth]
return random.choice(choices)
deepest_first.sel = min
#function to simply return randomly from any of the best children, ignoring depth
def ignore_depth(best_child_list):
return random.choice(best_child_list)
ignore_depth.sel = lambda x: x[0]
def default_volati | lity_measure(game):
return False
class ABPruning_Tree(object):
def __init__(self, game, depth_lim = DEFAULT_DEPTH, A = LOWER_BOUND, B = UPPER_BOUND, heuristic = None, i_am_max = True):
self.game = game
self.state = str(self.game)
self.children = {}
self.child_moves = {}
self.best_child = []
self.alpha = A
self.beta = B
self.depth_limit = depth_lim
self.evaluate = heuristic
self.is_volatile = default_volatility_measure
self. | value = 0
self.is_max = i_am_max
self.have_children = False
self.tpos_mngr = transpositions.Transposition_Manager(3)
self.choose_best_child = self.best_chance_selector
self.depth_sel = self.best_chance_sel
def re_init(self, depth, A, B):
self.depth_limit = depth
self.alpha = A
self.beta = B
self.value = 0
def best_chance_selector(self, best_child_list):
value = None
if self.is_max:
if self.value > 0:
value = shallowest_first(best_child_list)
if self.value <= 0:
value = deepest_first(best_child_list)
else:
if self.value < 0:
value = shallowest_first(best_child_list)
if self.value >= 0:
value = deepest_first(best_child_list)
return value
def best_chance_sel(self, depth_list):
value = None
if self.is_max:
if self.value > 0:
value = max(depth_list)
if self.value <= 0:
value = min(depth_list)
else:
if self.value < 0:
value = max(depth_list)
if self.value >= 0:
value = min(depth_list)
return value
def set_heuristic(self, heuristic):
self.evaluate = heuristic
def set_child_selector(self, selector, depth_selector):
self.choose_best_child = selector
self.depth_sel = depth_selector
def set_volatility_measure(self, vol):
self.is_volatile = vol
def set_transposition_manager(self, tpm):
self.tpos_mngr = tpm
def get_depth(self):
return self.depth_limit
def set_game(self, game):
self.game = game
def set_children(self):
# self.child_moves = {}
# need to get the actual move so that we can return that, not the state.
self.child_moves = self.game.get_child_state2move_dict()
self.children = {x:None for x in self.game.get_child_states()}
def get_child_tree_by_state(self, child_state):
t = self
if child_state in self.children:
if self.children[child_state] == None:
child = self.make_child_by_state(child_state)
self.children[child_state] = child
t = self.children[child_state]
else:
t = self.make_child_by_state(child_state)
return t
def is_terminal_node(self):
return self.game.is_game_over()
def get_best_child_tuple(self):
value = []
if len(self.best_child)==1:
value = self.best_child[0]
else:
value = self.choose_best_child(self.best_child)
return value
def make_child_by_state(self, child_state):
baby = self.game.make_new_instance()
baby.load_state_from_string(child_state)
child = ABPruning_Tree(baby, self.depth_limit-1, self.alpha, self.beta, self.evaluate, baby.get_player_num() == 1)
child.set_volatility_measure(self.is_volatile)
child.set_child_selector(self.choose_best_child, self.depth_sel)
child.set_transposition_manager(self.tpos_mngr)
return child
def search(self):
search_tup = (0,0)
key = transpositions.Transposition_Key(self.game.parse_state(self.state), self.depth_limit, self.alpha, self.beta, self.is_max)
if key in self.tpos_mngr:
search_tup = self.tpos_mngr[key]
else:
search_tup = self._search()
self.tpos_mngr[key] = search_tup
return search_tup
def _search(self):
best_child_depth = 0
if (self.depth_limit <= 0 and not self.is_volatile(self.state)) or (self.depth_limit == VOLATILE_DEPTH) or self.is_terminal_node():
self.value = self.evaluate(self.state)
best_child_depth = self.depth_limit
else:
if self.children == {}:
self.set_children()
if self.is_max:
self.value = LOWER_BOUND
else:
self.value = UPPER_BOUND
for child_state in self.children:
if self.children[child_state] == None:
child = self.make_child_by_state(child_state)
self.children[child_state] = child
else:
self.children[child_state].re_init(self.depth_limit-1, self.alpha, self.beta)
search_tup = self.children[child_state].search()
child_value = search_tup[0]
child_depth = search_tup[1]
if (self.is_max and child_value > self.value) or (not self.is_max and child_value < self.value):
self.best_child = [(child_state,self.child_moves[child_state],child_depth)]
best_child_depth = child_depth
elif child_value == self.value:
self.best_child += [(child_state,self.child_moves[child_state],child_depth)]
best_child_depth = self.depth_sel([best_child_depth, child_depth])
if self.is_max:
self.value = max(self.value, child_value)
self.alpha = max(self.alpha, self.value)
else:
self.value = min(self.value, child_value)
self.beta = min(self.beta, self.value)
if self.beta < self.alpha:
break
return (self.value, best_child_depth)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.