repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
nikesh-mahalka/cinder | cinder/tests/unit/test_dellsc.py | Python | apache-2.0 | 85,579 | 0 | # Copyright (c) 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.dell import dell_storagecenter_common
from cinder.volume.drivers.dell import dell_storagecenter_iscsi
from cinder.volume import volume_types
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanISCSIDriverTestCase(test.TestCase):
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
MAPPINGS = [{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage | Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
| u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-46-250',
u'description': u'Cinder Clone Replay',
u'parent': {u'instanceId': u'64702.46.249',
u'instanceName': u'64702-46-249',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.46.250',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'12/09/2014 03:52:08 PM',
u'createVolume': {u'instanceId': u'64702.46',
u'instanceName':
u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'objectType': u'ScVolume'},
u'expireTime': u'12/09/2014 04:52:08 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7910,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'12/09/2014 03:52:08 PM',
u'size': u'0.0 Bytes'
}
SCRPLAYPROFILE = {u'ruleCount': 0,
u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
u'volumeCount': 0,
|
willvanwazer/offloading | offloading/wsgi.py | Python | mit | 395 | 0.002532 | """
WSGI config for offloading project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more inf | ormation on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "offloading.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi | _application()
|
ZwaConnection/TheGuildWeb | apps/member/decorators.py | Python | gpl-3.0 | 646 | 0.004644 | from django.http import HttpResponseRedirect
def anonymous_required(view, redirect_to= None):
return AnonymousRequired(view, redirect_to)
class AnonymousRequired(object):
def __init__(self, view, redirect_to):
if redirect_to is None:
from django.conf import settings
redirect_to = settings.LOGIN_REDIRECT_URL
self.view = view
self.redirect_to = redirect_to
def __call__(self, request, *args, **kwargs):
| if request.user is not None and request.user.is_authenticated:
return HttpResponseRedirect(self.redirect_to)
| return self.view(request, *args, **kwargs)
|
AlessandroZ/LaZagne | Linux/lazagne/softwares/chats/pidgin.py | Python | lgpl-3.0 | 2,313 | 0.000432 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import traceback
from lazagne.config.module_info import ModuleInfo
from xml.etree.cElementTree import ElementTree
from lazagne.config import homes
class Pidgin(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'pidgin', 'chats')
# If pidgin is started, use the api to retrieve all passwords
def get_password_from_dbus(self):
try:
import dbus
except ImportError:
self.debug('Dbus not installed: sudo apt-get install python-dbus')
return []
pwd_found = []
fo | r _, session in homes.sessions():
try:
bus = dbus.bus.BusConnection(session)
| purple = bus.get_object(
"im.pidgin.purple.PurpleService",
"/im/pidgin/purple/PurpleObject",
"im.pidgin.purple.PurpleInterface"
)
acc = purple.PurpleAccountsGetAllActive()
for x in range(len(acc)):
_acc = purple.PurpleAccountsGetAllActive()[x]
pwd_found.append({
'Login': purple.PurpleAccountGetUsername(_acc),
'Password': purple.PurpleAccountGetPassword(_acc),
'Protocol': purple.PurpleAccountGetProtocolName(_acc),
})
bus.flush()
bus.close()
except Exception as e:
self.debug(e)
return pwd_found
def run(self):
pwd_found = self.get_password_from_dbus()
for path in homes.get(file=os.path.join('.purple', 'accounts.xml')):
tree = ElementTree(file=path)
root = tree.getroot()
for account in root.findall('account'):
if account.find('name') is not None:
name = account.find('name')
password = account.find('password')
if name is not None and password is not None:
pwd_found.append(
{
'Login': name.text,
'Password': password.text
}
)
return pwd_found
|
ztane/zsos | userland/lib/python2.5/logging/__init__.py | Python | gpl-3.0 | 47,144 | 0.003076 | # Copyright 2001-2007 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software with | out specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Loggi | ng package for Python. Based on PEP 282 and comments thereto in
comp.lang.python, and influenced by Apache's log4j system.
Should work under Python versions >= 1.5.2, except that source line
information is not available unless 'sys._getframe()' is.
Copyright (C) 2001-2007 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, types, time, string, cStringIO, traceback
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
__version__ = "0.5.0.2"
__date__ = "16 February 2007"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_traceback.tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates Handlers and so
#might arbitrary user threads. Since Handler.__init__() updates the shared
#dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
global _lock
if (not _lock) and thread:
_lock = threading.RLock()
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord:
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and (len(args) == 1) and args[0] and (type(args[0]) == types.DictType):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.leve |
JonasWallin/Mixture | test/speed_checks/speed_mixture.py | Python | gpl-3.0 | 1,989 | 0.025138 | '''
testing speedup of code
Created on Sep 17, 2016
@author: jonaswallin
'''
from Mixture.density import mNIG
from Mixture.density.purepython import mNIG as pmNIG
from Mixture import mixOneDims
import numpy as np
import numpy.random as npr
import timeit
# most speed here is used startup (iteration = 500, n = 1000)
# Cython:
# 2000 0.152 0.000 0.268 0.000 NIG.py:82(EV)
# 2000 0.098 0.000 0.145 0.000 NIG.py:39(dens)
# 2000 0.051 0.000 0.051 0.000 {Mixture.util.cython_Bessel.Bessel0approx}
# 2000 0.037 0.000 0.037 0.000 {Mixture.util.cython_Bessel.Bessel1approx}
# Pure Python:
# 2000 1.201 0.001 1.264 0.001 NIG.py:208(EV)
# 2000 1.195 0.001 1.201 0.001 NIG.py:248(dens)
# Pure Python, no precompute:
# 2000 2.322 0.001 2.387 0.001 NIG.py:208(EV)
# 2000 1.205 0.001 1.211 0.001 NIG.py:248(dens)
npr.seed(10)
def speed_python(pure_python=False, precompute = True):
K = 2
d = 2
iteration = 500
mixObj = mixOneDims(K=K, d=d)
if pure_python:
mixObj.set_densites([pmNIG(d=d) for k in range(K)]) # @UnusedVariable
else:
mixObj.set_densites([mNIG(d=d) for k in range(K)]) # @UnusedVariable
paramMat_true = [np.array([[1.1, 1.12, 0.1, 0],
[-1, 0,2 , -4] ]),
np.array([[-2, 0, 0.3, 0],
[1, 0, 2 , -4] ])]
alpha_true = [0]
mixObj.set_paramMat(alpha = alpha_true,paramMat = paramMat_true)
Y = mixObj.sample(n = 1000)
mixObj.s | et_data(Y)
paramMat = [npr.randn(2,4),npr.randn(2,4)]
paramMat[0][0,0] = 1.1
paramMat[1][0,0] = -2
alpha = np.array(alpha_true)
for i in range(iteration): # @UnusedVariable
p, alpha, paramMat = mixObj.EMstep(alpha = alpha, paramMat = paramMat , precompute = precompute) # @UnusedVariable
if __name__ == "__main__":
| |
antoinecarme/pyaf | tests/artificial/transf_BoxCox/trend_MovingMedian/cycle_0/ar_12/test_artificial_32_BoxCox_MovingMedian_0_12_20.py | Python | bsd-3-clause | 264 | 0.087121 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FRE | Q = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 0, transform = "BoxCox", sigma = 0.0, exog_count = 20, a | r_order = 12); |
servalproject/nikola | nikola/plugins/template_mako.py | Python | mit | 3,347 | 0.000299 | # Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Mako template handlers"""
import os
import shutil
from mako import util, lexer
from mako.lookup import TemplateLookup
from nikola.plugin_categories import TemplateSystem
class MakoTemplates(TemplateSystem):
"""Wrapper for Mako templates."""
name = "mako"
lookup = None
cache = {}
def get_deps(self, filename):
text = util.read_file(filename)
| lex = lexer.Lexer(text=text, filename=filename)
lex.parse()
deps = []
for n | in lex.template.nodes:
keyword = getattr(n, 'keyword', None)
if keyword in ["inherit", "namespace"]:
deps.append(n.attributes['file'])
# TODO: include tags are not handled
return deps
def set_directories(self, directories, cache_folder):
"""Createa template lookup."""
cache_dir = os.path.join(cache_folder, '.mako.tmp')
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
self.lookup = TemplateLookup(
directories=directories,
module_directory=cache_dir,
output_encoding='utf-8')
def render_template(self, template_name, output_name, context):
"""Render the template into output_name using context."""
template = self.lookup.get_template(template_name)
data = template.render_unicode(**context)
if output_name is not None:
try:
os.makedirs(os.path.dirname(output_name))
except:
pass
with open(output_name, 'w+') as output:
output.write(data)
return data
def template_deps(self, template_name):
"""Returns filenames which are dependencies for a template."""
# We can cache here because depedencies should
# not change between runs
if self.cache.get(template_name, None) is None:
template = self.lookup.get_template(template_name)
dep_filenames = self.get_deps(template.filename)
deps = [template.filename]
for fname in dep_filenames:
deps += self.template_deps(fname)
self.cache[template_name] = tuple(deps)
return list(self.cache[template_name])
|
oneman/xmms2-oneman-old | wafadmin/Tools/suncxx.py | Python | lgpl-2.1 | 1,743 | 0.043603 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
# Ralf Habacker, 2006 (rh)
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
@conftest
def find_sxx(conf):
v = conf.env
cc = None
if v['CXX']: cc = v['CXX']
elif 'CXX' in conf.environ: cc = conf.environ['CXX']
#if not cc: cc = conf.find_program('g++', var='CXX')
if not cc: cc = conf.find_program('c++', var='CXX')
if not cc: cc = conf.find_program('CC', var='CXX') #studio
if not cc: conf.fatal('sunc++ was not found')
v['CXX'] = cc
v['CXX_NAME'] = 'sun'
@conftest
def sxx_common_flags(conf):
v = conf.env
# CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
v['CXX_SRC_F'] = ''
v['CXX_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
v['CXXLNK_SRC_F'] = ''
v['CXXLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CXXDEFINES_ST'] = '-D%s'
v['SONAME_ST'] | = '-Wl,-h -Wl,%s'
v['SHLIB_MARKER'] = '-Bdynamic'
v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# s | hared library
v['shlib_CXXFLAGS'] = ['-Kpic', '-DPIC']
v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
v['staticlib_LINKFLAGS'] = ['-Bstatic']
v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_sxx
find_cpp
find_ar
sxx_common_flags
cxx_load_tools
cxx_add_flags
'''
|
kaichogami/sympy | sympy/crypto/crypto.py | Python | bsd-3-clause | 56,953 | 0.000755 | # -*- coding: utf-8 -*-
"""
This file contains some classical ciphers and routines
implementing a linear-feedback shift register (LFSR)
and the Diffie-Hellman key exchange.
"""
from __future__ import print_function
from string import whitespace, ascii_uppercase as uppercase, printable
from sympy import nextprime
from sympy.core import Rational, S, Symbol
from sympy.core.numbers import igcdex, mod_inverse
from sympy.core.compatibility import range
from sympy.matrices import Matrix
from sympy.ntheory import isprime, totient, primitive_root
from sympy.polys.domains import FF
from sympy.polys.polytools import gcd, Poly
from sympy.utilities.misc import filldedent, translate
from sympy.utilities.iterables import flatten, uniq
from sympy.utilities.randtest import _randrange
def AZ(s=None):
"""Return the letters of ``s`` in uppercase. In case more than
one string is passed, each of them will be processed and a list
of upper case strings will be returned.
Examples
========
>>> from sympy.crypto.crypto import AZ
>>> AZ('Hello, world!')
'HELLOWORLD'
>>> AZ('Hello, world!'.split())
['HELLO', 'WORLD']
See Also
========
check_and_join
"""
if not s:
return uppercase
t = type(s) is str
if t:
s = [s]
rv = [check_and_join(i.upper().split(), uppercase, filter=True)
for i in s]
if t:
return rv[0]
return rv
bifid5 = AZ().replace('J', '')
bifid6 = AZ() + '0123456789'
bifid10 = printable
def padded_key(key, symbols, filter=True):
"""Return a string of the distinct characters of ``symbols`` with
those of ``key`` appearing first, omitting characters in ``key``
that are n | ot in ``symbols``. A ValueError is raised if a) there | are
duplicate characters in ``symbols`` or b) there are characters
in ``key`` that are not in ``symbols``.
Examples
========
>>> from sympy.crypto.crypto import padded_key
>>> padded_key('PUPPY', 'OPQRSTUVWXY')
'PUYOQRSTVWX'
>>> padded_key('RSA', 'ARTIST')
Traceback (most recent call last):
...
ValueError: duplicate characters in symbols: T
"""
syms = list(uniq(symbols))
if len(syms) != len(symbols):
extra = ''.join(sorted(set(
[i for i in symbols if symbols.count(i) > 1])))
raise ValueError('duplicate characters in symbols: %s' % extra)
extra = set(key) - set(syms)
if extra:
raise ValueError(
'characters in key but not symbols: %s' % ''.join(
sorted(extra)))
key0 = ''.join(list(uniq(key)))
return key0 + ''.join([i for i in syms if i not in key0])
def check_and_join(phrase, symbols=None, filter=None):
"""
Joins characters of `phrase` and if ``symbols`` is given, raises
an error if any character in ``phrase`` is not in ``symbols``.
Parameters
==========
phrase: string or list of strings to be returned as a string
symbols: iterable of characters allowed in ``phrase``;
if ``symbols`` is None, no checking is performed
Examples
========
>>> from sympy.crypto.crypto import check_and_join
>>> check_and_join('a phrase')
'a phrase'
>>> check_and_join('a phrase'.upper().split())
'APHRASE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True)
'ARAE'
>>> check_and_join('a phrase!'.upper().split(), 'ARE')
Traceback (most recent call last):
...
ValueError: characters in phrase but not symbols: "!HPS"
"""
rv = ''.join(''.join(phrase))
if symbols is not None:
symbols = check_and_join(symbols)
missing = ''.join(list(sorted(set(rv) - set(symbols))))
if missing:
if not filter:
raise ValueError(
'characters in phrase but not symbols: "%s"' % missing)
rv = translate(rv, None, missing)
return rv
def _prep(msg, key, alp, default=None):
if not alp:
if not default:
alp = AZ()
msg = AZ(msg)
key = AZ(key)
else:
alp = default
else:
alp = ''.join(alp)
key = check_and_join(key, alp, filter=True)
msg = check_and_join(msg, alp, filter=True)
return msg, key, alp
def cycle_list(k, n):
"""
Returns the elements of the list ``range(n)`` shifted to the
left by ``k`` (so the list starts with ``k`` (mod ``n``)).
Examples
========
>>> from sympy.crypto.crypto import cycle_list
>>> cycle_list(3, 10)
[3, 4, 5, 6, 7, 8, 9, 0, 1, 2]
"""
k = k % n
return list(range(k, n)) + list(range(k))
######## shift cipher examples ############
def encipher_shift(msg, key, symbols=None):
"""
Performs shift cipher encryption on plaintext msg, and returns the
ciphertext.
Notes
=====
The shift cipher is also called the Caesar cipher, after
Julius Caesar, who, according to Suetonius, used it with a
shift of three to protect messages of military significance.
Caesar's nephew Augustus reportedly used a similar cipher, but
with a right shift of 1.
ALGORITHM:
INPUT:
``key``: an integer (the secret key)
``msg``: plaintext of upper-case letters
OUTPUT:
``ct``: ciphertext of upper-case letters
STEPS:
0. Number the letters of the alphabet from 0, ..., N
1. Compute from the string ``msg`` a list ``L1`` of
corresponding integers.
2. Compute from the list ``L1`` a new list ``L2``, given by
adding ``(k mod 26)`` to each element in ``L1``.
3. Compute from the list ``L2`` a string ``ct`` of
corresponding letters.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
There is also a convenience function that does this with the
original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
msg, _, A = _prep(msg, '', symbols)
shift = len(A) - key % len(A)
key = A[shift:] + A[:shift]
return translate(msg, key, A)
def decipher_shift(msg, key, symbols=None):
"""
Return the text by shifting the characters of ``msg`` to the
left by the amount given by ``key``.
Examples
========
>>> from sympy.crypto.crypto import encipher_shift, decipher_shift
>>> msg = "GONAVYBEATARMY"
>>> ct = encipher_shift(msg, 1); ct
'HPOBWZCFBUBSNZ'
To decipher the shifted text, change the sign of the key:
>>> encipher_shift(ct, -1)
'GONAVYBEATARMY'
Or use this function with the original key:
>>> decipher_shift(ct, 1)
'GONAVYBEATARMY'
"""
return encipher_shift(msg, -key, symbols)
######## affine cipher examples ############
def encipher_affine(msg, key, symbols=None, _inverse=False):
r"""
Performs the affine cipher encryption on plaintext ``msg``, and
returns the ciphertext.
Encryption is based on the map `x \rightarrow ax+b` (mod `N`)
where ``N`` is the number of characters in the alphabet.
Decryption is based on the map `x \rightarrow cx+d` (mod `N`),
where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`).
In particular, for the map to be invertible, we need
`\mathrm{gcd}(a, N) = 1` and an error will be raised if this is
not true.
Notes
=====
This is a straightforward generalization of the shift cipher with
the added complexity of requiring 2 characters to be deciphered in
order to recover the key.
ALGORITHM:
INPUT:
``msg``: string of characters that appear in ``symbols``
``a, b``: a pair integers, with ``gcd(a, N) = 1``
(the secret key)
``symbols``: string of characters (default = uppercase
letters). When no symbols are given, ``msg`` is converted
to upper case letters and all other charactes |
estnltk/pyvabamorf | pyvabamorf/__init__.py | Python | lgpl-3.0 | 304 | 0.016447 | # -*- coding: utf-8 -*-
import pyvabamorf.vabamorf as | vm
import atexit
if not vm.FSCInit():
raise Exception('Could not initiate pyvabamorf library. FSC | Init() returned false!')
@atexit.register
def terminate():
vm.FSCTerminate()
from morf import analyze, synthesize
from morf import PyVabamorf
|
schleichdi2/OPENNFR-6.3-CORE | opennfr-openembedded-core/meta/lib/oeqa/selftest/cases/fetch.py | Python | gpl-2.0 | 1,543 | 0.001296 | #
# SPDX-License-Identifier: MIT
#
import oe.path
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake
class Fetch(OESelftestTestCase):
def test_git_mirrors(self):
"""
Verify that the git fetcher will fall back to the HTTP mirrors. The
recipe needs to be one that we have on the Yocto Project source mirror
and is hosted in git.
"""
# TODO: mktempd instead of hardcoding
dldir = os.path.join(self.builddir, "download-git-mirrors")
self.track_for_cleanup(dldir)
# No mirrors, should use git to fetch successfully
features = """
D | L_DIR = "%s"
MIRRORS_forcevariable = ""
PREMIRRORS_forcevariable = ""
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
bitbake("dbus-wait -c fetch -f")
# No mirrors and broken git, should fail
features = """
DL_DIR = "%s"
GIT_PROXY_COM | MAND = "false"
MIRRORS_forcevariable = ""
PREMIRRORS_forcevariable = ""
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
with self.assertRaises(AssertionError):
bitbake("dbus-wait -c fetch -f")
# Broken git but a specific mirror
features = """
DL_DIR = "%s"
GIT_PROXY_COMMAND = "false"
MIRRORS_forcevariable = "git://.*/.* http://downloads.yoctoproject.org/mirror/sources/"
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
bitbake("dbus-wait -c fetch -f")
|
Shekharrajak/pydy | examples/babyboot/babyboot.py | Python | bsd-3-clause | 2,234 | 0.002686 | from sympy import symbols
from sympy.physics.mechanics import *
# -- Declare symbols.
# Declare degrees of freedom, and their time derivatives.
# TODO get rid of those which are unnecessary.
qA, qB, uA, uB = dynamicsymbols('qA qB uA uB')
qAd, qBd, uAd, uBd = dynamicsymbols('qA qB uA uB', 1)
# Declare the constants in the problem.
LA, LB, mA, mB, IAx, IBx, IBy, IBz, g = symbols(
'LA LB mA mB IAx IBx IBy IBz g')
# TODO no clue.
mechanics_printing()
# -- Set up geometry.
# Fixed frame at base of upper rod.
N = ReferenceFrame('N')
# Frame tracking the upper rod.
frameA = N.orientnew('frameA', 'Axis', [qA, N.x])
frameB = frameA.orientnew('frameB', 'Axis', [qB, frameA.z])
# TODO why do we need to do this?
frameA.set_ang_vel(N, uA * N.x)
frameA.set_ang_acc(N, frameA.ang_vel_in(N).dt(N)) # TODO
frameB.set_ang_vel(frameA, uB * frameA.z)
frameB.set_ang_acc(frameA, frameB.ang_vel_in(frameA).dt(frameA))
# Origin.
NO = Point('NO')
NO.set_vel(N, 0)
# Center of mass of upper rod.
Acm = NO.locatenew('Acm', -LA * frameA.z)
Acm.v2pt_theory(NO, N, | frameA) # Don't need this in MotionGenesis.
Bcm = NO.locatenew('Bcm', - LB * frameA.z)
Bcm.v2pt_theory(NO, N, frameA) # Don't need this in MotionGenesis.
# Inertia dyadic.
# TODO inertia dyadics are about a specific point, right? are | we requiring the user to define it about the COM always or something? idk if this info should be separate. we have inertia info floating about in 2 different locations.
IA = inertia(frameA, IAx, 0, 0)
IB = inertia(frameB, IBx, IBy, IBz)
# Create rigid bodies.
BodyA = RigidBody('BodyA', Acm, frameA, mA, (IA, Acm))
BodyB = RigidBody('BodyB', Bcm, frameB, mB, (IB, Bcm))
BodyList = [BodyA, BodyB]
# Forces.
# Would be nice to have a method that applies gravity force to all objects.
ForceList = [(Acm, - mA * g * N.z), (Bcm, - mB * g * N.z)]
# Kinematic differential equations. TODO necessary?
kd = [qAd -uA, qBd - uB] # TODO constrain upper rod to O?
KM = KanesMethod(N, q_ind=[qA, qB], u_ind=[uA, uB], kd_eqs=kd)
(fr, frstar) = KM.kanes_equations(ForceList, BodyList)
# Get equations of motion.
MM = KM.mass_matrix
forcing = KM.forcing
rhs = MM.inv() * forcing
kdd = KM.kindiffdict()
rhs = rhs.subs(kdd)
rhs.simplify()
mprint(rhs)
|
zsdonghao/tensorlayer | tests/models/test_seq2seq_model.py | Python | apache-2.0 | 3,088 | 0.002915 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
from tqdm import tqdm
import tensorlayer as tl
from tensorlayer.cost import cross_entropy_seq
from tensorlayer.models.seq2seq import Seq2seq
from tests.utils import CustomTestCase
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Model_SEQ2SEQ_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
cls.batch_size = 16
cls.vocab_size = 20
cls.embedding_size = 32
cls.dec_seq_length = 5
cls.trainX = np.random.randint(20, size=(50, 6))
cls.trainY = np.random.randint(20, size=(50, cls.dec_seq_length + 1))
cls.trainY[:, 0] = | 0 # start_token == 0
# Parameters
cls.src_len = len(cls.trainX)
cls.tgt_len = len(cls.trainY)
assert cls.src_len == cls.tgt_len
cls.num_epochs = 100
cls.n_step = cls.src_len // cls.batch_size
@classmethod
def tearDownClass(cls):
pass
def test_basic_simpleSeq2Seq(self):
model_ = Seq2seq(
decoder_seq_length=5,
cell_enc=tf.keras.layers.GRUCell,
cell_dec=tf. | keras.layers.GRUCell,
n_layer=3,
n_units=128,
embedding_layer=tl.layers.Embedding(vocabulary_size=self.vocab_size, embedding_size=self.embedding_size),
)
optimizer = tf.optimizers.Adam(learning_rate=0.001)
for epoch in range(self.num_epochs):
model_.train()
trainX, trainY = shuffle(self.trainX, self.trainY)
total_loss, n_iter = 0, 0
for X, Y in tqdm(tl.iterate.minibatches(inputs=trainX, targets=trainY, batch_size=self.batch_size,
shuffle=False), total=self.n_step,
desc='Epoch[{}/{}]'.format(epoch + 1, self.num_epochs), leave=False):
dec_seq = Y[:, :-1]
target_seq = Y[:, 1:]
with tf.GradientTape() as tape:
## compute outputs
output = model_(inputs=[X, dec_seq])
output = tf.reshape(output, [-1, self.vocab_size])
loss = cross_entropy_seq(logits=output, target_seqs=target_seq)
grad = tape.gradient(loss, model_.all_weights)
optimizer.apply_gradients(zip(grad, model_.all_weights))
total_loss += loss
n_iter += 1
model_.eval()
test_sample = trainX[0:2, :].tolist()
top_n = 1
for i in range(top_n):
prediction = model_([test_sample], seq_length=self.dec_seq_length, start_token=0, top_n=1)
print("Prediction: >>>>> ", prediction, "\n Target: >>>>> ", trainY[0:2, 1:], "\n\n")
# printing average loss after every epoch
print('Epoch [{}/{}]: loss {:.4f}'.format(epoch + 1, self.num_epochs, total_loss / n_iter))
if __name__ == '__main__':
unittest.main()
|
ihash/jomlak | jomlak/urls.py | Python | mit | 1,199 | 0.000834 | """jomlak URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app i | mport views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import | Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from .views import FirstView, SalView
from django.views.decorators.csrf import csrf_exempt
from posts.views import MakePost, SubmitPost, PostListView, LikePostView, Bot
urlpatterns = [
url(r'^search/', FirstView.as_view()),
url(r'^sal/$', SalView.as_view()),
url(r'^form/$', MakePost.as_view()),
url(r'^post/', SubmitPost.as_view()),
url(r'^postlist/$', PostListView.as_view()),
url(r'^like/(?P<asha>\d+)/$', LikePostView.as_view()),
url(r'^bot/$', csrf_exempt(Bot.as_view())),
] |
vitan/openrave | python/ikfast_generator_cpp.py | Python | lgpl-3.0 | 127,669 | 0.011397 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (Lesser GPL)
#
# Copyright (C) 2009-2012 Rosen Diankov <rosen.diankov@gmail.com>
#
# ikfast is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# ikfast is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""generates C++ code from the IKFastSolver AST.
"""
from __future__ import with_statement # for python 2.5
from sympy import __version__ as sympy_version
if sympy_version < '0.7.0':
raise ImportError('ikfast needs sympy 0.7.x or greater')
import sys, copy, time, datetime
import cStringIO
try:
from openravepy.metaclass import AutoReloader
except:
class AutoReloader:
pass
# import the correct iktypes from openravepy (if present)
try:
from openravepy import IkParameterization
IkType = IkParameterization.Type
except:
class IkType:
Transform6D=0x67000001
Rotation3D=0x34000002
Translation3D=0x33000003
Direction3D=0x23000004
Ray4D=0x46000005
Lookat3D=0x23000006
TranslationDirection5D=0x56000007
TranslationXY2D=0x22000008
TranslationXYOrientation3D=0x33000009
TranslationLocalGlobal6D=0x3600000a
TranslationXAxisAngle4D=0x4400000b
TranslationYAxisAngle4D=0x4400000c
TranslationZAxisAngle4D=0x4400000d
TranslationXAxisAngleZNorm4D=0x4400000e
TranslationYAxisAngleXNorm4D=0x4400000f
TranslationZAxisAngleYNorm4D=0x44000010
from sympy import *
try:
import re # for indenting
except Impo | rtError:
pass
try:
from itertools import izip, combinations
except ImportError:
def combinations(items,n):
if n == 0: yield[]
else:
for i in xrange(len(items)):
| for cc in combinations(items[i+1:],n-1):
yield [items[i]]+cc
try:
# not necessary, just used for testing
import swiginac
using_swiginac = True
except ImportError:
using_swiginac = False
import logging
log = logging.getLogger('openravepy.ikfast')
from sympy.core import function # for sympy 0.7.1+
class fmod(function.Function):
nargs = 2
is_real = True
is_Function = True
class atan2check(atan2):
nargs = 2
is_real = True
is_Function = True
def evalNumbers(expr):
"""Replaces all numbers with symbols, this is to make gcd faster when fractions get too big"""
if expr.is_number:
return expr.evalf()
elif expr.is_Mul:
result = S.One
for arg in expr.args:
result *= evalNumbers(arg)
elif expr.is_Add:
# because the arguments can get to the thousands, do a tree for adding numbers
evalexprs = [evalNumbers(arg) for arg in expr.args]
N = len(evalexprs)
while N > 1:
for i in range(N/2):
evalexprs[2*i]+=evalexprs[2*i+1]
evalexprs[i] = evalexprs[2*i]
if N & 1:
evalexprs[N/2] = evalexprs[N-1]
N += 1
N /= 2
return evalexprs[0]
elif expr.is_Pow:
# don't replace the exponent
result = evalNumbers(expr.base)**expr.exp
elif expr.is_Function:
args = [evalNumbers(arg) for arg in expr.args]
return expr.func(*args)
else:
result = expr
return result
def customcse(rawexprs,symbols=None):
if not hasattr(rawexprs,'__iter__') and not hasattr(rawexprs,'__array__'):
rawexprs = [rawexprs]
if symbols is None:
symbols = cse_main.numbered_symbols('x')
# fractions can get big, so evaluate as many decimals as possible
reduced_exprs = []
allexprs = []
for iexpr,expr in enumerate(rawexprs):
evalexpr = evalNumbers(expr)
complexity = evalexpr.count_ops()
# need to threshold complexity or otherwise cse will not terminate
if complexity > 300:
reduced_exprs.append(evalexpr)
else:
allexprs.append(evalexpr)
reduced_exprs.append(None)
newreplacements = []
if len(allexprs)>0:
replacements,reduced_exprs2 = cse(allexprs,symbols=symbols)
# have to maintain the same order
for expr in reduced_exprs2:
for i in range(len(reduced_exprs)):
if reduced_exprs[i] is None:
reduced_exprs[i] = expr
break
assert(all([expr is not None for expr in reduced_exprs]))
# look for any expressions of the order of (x**(1/a))**b, usually computer wants x^(b/a)
for r in replacements:
newr = r[1]
if newr.is_Pow and newr.exp.is_number and newr.base.is_Symbol:
baseexpr = newr.base.subs(replacements)
if baseexpr.is_Pow and baseexpr.exp.is_number:
newreplacements.append((r[0],baseexpr.base**(newr.exp*baseexpr.exp)))
continue
newreplacements.append((r[0],newr))
return newreplacements,reduced_exprs
class CodeGenerator(AutoReloader):
"""Generates C++ code from an AST generated by IKFastSolver.
"""
def __init__(self,kinematicshash='',version='0'):
self.symbolgen = cse_main.numbered_symbols('x')
self.strprinter = printing.StrPrinter({'full_prec':False})
self.freevars = None # list of free variables in the solution
self.freevardependencies = None # list of variables depending on the free variables
self.functions = dict()
self.kinematicshash=kinematicshash
self.resetequations() # dictionary of symbols already written
self._globalvariables = {} # a set of global variables already written
self._solutioncounter = 0
self.version=version
def resetequations(self):
self.dictequations = [[],[]]
def copyequations(self,dictequations=None):
if dictequations is None:
dictequations=self.dictequations
return [copy.copy(dictequations[0]),copy.copy(dictequations[1])]
def generate(self, solvertree):
code = """/// autogenerated analytical inverse kinematics code from ikfast program part of OpenRAVE
/// \\author Rosen Diankov
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// ikfast version %s generated on %s
/// To compile with gcc:
/// gcc -lstdc++ ik.cpp
/// To compile without any main function as a shared object (might need -llapack):
/// gcc -fPIC -lstdc++ -DIKFAST_NO_MAIN -DIKFAST_CLIBRARY -shared -Wl,-soname,libik.so -o libik.so ik.cpp
#define IKFAST_HAS_LIBRARY
#include "ikfast.h" // found inside share/openrave-X.Y/python/ikfast.h
using namespace ikfast;
// check if the included ikfast version matches what this file was compiled with
#define IKFAST_COMPILE_ASSERT(x) extern int __dummy[(int)x]
IKFAST_COMPILE_ASSERT(IKFAST_VERSION==%s);
#include <cmath>
#include <vector>
#include <limits>
#include <algorithm>
#include <complex>
#define IKFAST_STRINGIZE2(s) #s
#define IKFAST_STRINGIZE(s) IKFAST_STRINGIZE2(s)
#ifndef IKFAST_ASSERT
#include <stdexcept>
#include <sstream>
#include <iostream>
#ifdef _MSC_VER
#ifndef __PRETTY_FUNCTION__
#define __PRETTY_FUNCTION__ __FUNCDNAME__
# |
kinow-io/kinow-python-sdk | kinow_client/models/video_response.py | Python | apache-2.0 | 18,886 | 0.000265 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VideoResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, id_product=None, id_video_group=None, id_product_image=None, id_language=None, language_filter=None, id_media_source=None, name=None, description=None, duration=None, filename=None, position=None, subscription=None, free=None, download=None, active=None, date_add=None, date_upd=None, can_watch=None, cover=None, thumbnail=None, geoloc_enabled=None, behavior_detected_countries=None, behavior_non_detected_countries=None, has_free_access=None, advertising_url=None):
"""
VideoResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'id_product': 'int',
'id_video_group': 'int',
'id_product_image': 'int',
'id_language': 'int',
'language_filter': 'int',
'id_media_source': 'int',
'name': 'list[I18nField]',
'description': 'list[I18nField]',
'duration': 'int',
'filename': 'str',
'position': 'int',
'subscription': 'int',
'free': 'int',
'download': 'int',
'active': 'bool',
'date_add': 'str',
'date_upd': 'str',
'can_watch': 'bool',
'cover': 'str',
'thumbnail': 'str',
'geoloc_enabled': 'bool',
'behavior_detected_countries': 'str',
'behavior_non_detected_countries': 'str',
'has_free_access': 'VideoFreeAccess',
'advertising_url': 'str'
}
self.attribute_map = {
'id': 'id',
'id_product': 'id_product',
'id_video_group': 'id_video_group',
'id_product_image': 'id_product_image',
'id_language': 'id_language',
'language_filter': 'language_filter',
'id_media_source': 'id_media_source',
'name': 'name',
'description': 'description',
'duration': 'duration',
'filename': 'filename',
'position': 'position',
'subscription': 'subscription',
'free': 'free',
'download': 'download',
'active': 'active',
'date_add': 'date_add',
'date_upd': 'date_upd',
'can_watch': 'can_watch',
'cover': 'cover',
'thumbnail': 'thumbnail',
'geoloc_enabled': 'geoloc_enabled',
'behavior_detected_countries': 'behavior_detected_countries',
'behavior_non_detected_countries': 'behavior_non_detected_countries',
'has_free_access': 'has_free_access',
'advertising_url': 'advertising_url'
}
self._id = id
self._id_product = id_product
self._id_video_group = id_video_group
self._id_product_image = id_product_image
self._id_language = id_language
self._language_filter = language_filter
self._id_media_source = id_media_source
self._name = name
self._description = description
self._duration = duration
self._filename = filename
self._position = position
self._subscription = subscription
self._free = free
self._download = download
self._active = active
self._date_add = date_add
self._date_upd = date_upd
self._can_watch = can_watch
self._cover = cover
self._thumbnail = thumbnail
self._geoloc_enabled = geoloc_enabled
self._behavior_detected_countries = behavior_detected_countries
self._behavior_non_detected_countries = behavior_non_detected_countries
self._has_free_access = has_free_access
self._advertising_url = advertising_url
@property
def id(self):
"""
Gets the id of this VideoResponse.
:return: The id of this VideoResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this VideoResponse.
:param id: The id of this VideoResponse.
:type: int
"""
self._id = id
@property
def id_product(self):
"""
Gets the id_product of this VideoResponse.
:return: The id_product of this VideoResponse.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this VideoResponse.
:param id_product: The id_product of this VideoResponse.
:type: int
"""
self._id_product = id_product
@property
def id_video_group(self):
"""
Gets the id_video_group of this VideoResponse.
:return: The id_video_group of this VideoResponse.
:rtype: int
"""
return self._id_video_group
@id_video_group.setter
def id_video_group(self, id_video_group):
"""
Sets the id_video_group of this VideoResponse.
:param id_video_group: The id_video_group of this VideoResponse.
:type: int
"""
self._id_video_group = id_video_group
@property
def id_product_image(self):
"""
Gets the id_product_image of this VideoResponse.
:return: The id_product_image of this VideoResponse.
:rtype: int
"""
return self._id_product_image
@id_product_image.setter
def id_product_image(self, id_product_image):
"""
Sets the id_product_image of this VideoResponse.
:param id_product_image: The id_product_image of this VideoResponse.
:type: int
"""
self._id_product_image = id_product_image
@property
def id_language(self):
"""
Gets the id_language of this VideoResponse.
:return: The id_language of this VideoResponse.
:rtype: int
"""
return self._id_language
@id_language.setter
def id_language(self, id_language):
"""
| Sets the id_language of this VideoResponse.
:param id_language: The id_language of this VideoResponse.
:type: int
"""
self._id_language = id_language
@property
def language_filter(self):
"""
Gets the language_filter of this VideoResponse.
:return: The language_filter of this VideoResponse.
:rtype: int
"" | "
return self._language_filter
@language_filter.setter
def language_filter(self, language_filter):
"""
Sets the language_filter of this VideoResponse.
:param language_filter: The language_filter of this VideoResponse.
:type: int
"""
self._language_filter = language_filter
@property
def id_media_source(self):
"""
Gets the id_media_source of this VideoResponse.
:return: The id_media_source of this VideoResponse.
:rtype: int
"""
return self._id_media_source
@id_media_source.setter
def id_media_source(self, id_media_source):
"""
Sets the id_media_source of this VideoResponse.
:param id_media_source: The id_media_source of this VideoResponse.
:type: int
"""
self._id_media_source = id_media_source
@property
def name(self):
"""
Gets the name of this VideoResponse.
:return: The name of this VideoResponse.
:rtype: list[I18nField]
"""
return self |
steamclock/internetmap | Data-Pipeline/converttojson.py | Python | mit | 646 | 0.017028 |
#import results # Not using results?
import loc
import json
#asinfo = results.asinfo
locinfo = loc.locinfo
#fields = asinfo["fields"]
#del asinfo["fields"]
#print fields
# for asn, dat in asinfo.items():
# if int(asn) in locinfo:
# dat.extend(locinfo[int(asn)])
# else:
# dat.extend([0, 0])
#
# for asn, dat in asinfo.items():
# print asn, dat[0]
# if asn != dat[0]:
# print "\n\n\n\n\n\n\n"
print locinfo
# for val in locinfo.values():
# | print val
# udata = val[0].decode("utf-8");
# val[0] = udata.encode("ascii", "ignore")
# val[0] = unicode(val[ | 0])
# print val
f = open("asinfo.json", "w")
json.dump(locinfo, f)
f.close() |
SmartElect/SmartElect | voting/tests/test_models.py | Python | apache-2.0 | 15,122 | 0.000727 | import datetime
from unittest.mock import patch
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import TestCase
from django.utils.timezone import now
from pytz import timezone
from libya_elections.utils import at_midnight
from .factories import BallotFactory, CandidateFactory, ElectionFactory, RegistrationPeriodFactory
from ..models import Election, RegistrationPeriod, ReminderQueued
class UnicodeMethodTest(TestCase):
def test_election(self):
self.assertTrue(str(ElectionFactory()))
def test_candidate(self):
self.assertTrue(str(CandidateFactory()))
def test_ballot(self):
self.assertTrue(str(BallotFactory()))
class CandidateTest(TestCase):
def test_unique_numbers(self):
# Candidate numbers are unique
cand1 = CandidateFactory()
with self.assertRaises(IntegrityError):
CandidateFactory(candidate_number=cand1.candidate_number, ballot=cand1.ballot)
class ElectionTest(TestCase):
def setUp(self):
# Remove pre-loaded elections to not interfere with these tests
Election.objects.all().delete()
def test_dont_return_deleted_elections(self):
yesterday = now() - datetime.timedelta(days=1)
tomorrow = now() + datetime.timedelta(days=1)
e = ElectionFactory(polling_start_time=yesterday,
polling_end_time=tomorrow,
)
self.assertEqual(Election.objects.get_next_election(), e)
e.deleted = True
e.save()
self.assertEqual(Election.objects.get_next_election(), None)
def test_start_time(self):
# Setting any of the start times and saving changes the `start_time` field
tz = timezone(settings.TIME_ZONE)
d1 = tz.localize(datetime.datetime(2014, 1, 1, 13, 2, 0))
e = ElectionFactory(polling_start_time=d1)
e = Election.objects.get(pk=e.pk)
# The earliest time will be the work_start_time, which is on midnight,
# 2 days before
work_start = at_midnight(d1 - datetime.timedelta(days=2))
self.assertEqual(work_start, e.work_start_time)
self.assertEqual(work_start, e.start_time)
d3 = tz.localize(datetime.datetime(2013, 12, 31, 1, 1))
e.polling_start_time = d3
e.save()
e = Election.objects.get(pk=e.pk)
work_start = at_midnight(d3 - datetime.timedelta(days=2))
self.assertEqual(work_start, e.start_time)
def test_end_time(self):
# Setting any of the end times and saving changes the `end_time` field
tz = timezone(settings.TIME_ZONE)
d1 = tz.localize(datetime.datetime(2014, 1, 1, 13, 3, 0))
e = ElectionFactory(polling_end_time=d1)
e = Election.objects.get(pk=e.pk)
work_end = e.polling_end_time + datetime.timedelta(hours=16)
self.assertEqual(work_end, e.end_time)
def test_next_election(self):
self.assertIsNone(Election.objects.get_next_election())
ElectionFactory(
polling_start_time=now() - datetime.timedelta(days=1),
polling_end_time=now() - datetime.timedelta(days=1),
)
self.assertIsNone(Election.objects.get_next_election())
election2 = ElectionFactory(
polling_start_time=now() + datetime.timedelta(days=1),
polling_end_time=now() + datetime.timedelta(days=1),
)
self.assertEqual(election2, Election.objects.get_next_election())
ElectionFactory(
polling_start_time=now() + datetime.timedelta(days=2),
polling_end_time=now() + datetime.timedelta(days=2),
)
self.assertEqual(election2, Election.objects.get_next_election())
def test_previous_election(self):
self.assertIsNone(Election.objects.get_previous_election())
ElectionFactory(
polling_start_time=now() + datetime.timedelta(days=1),
polling_end_time=now() + datetime.timedelta(days=1),
)
self.assertIsNone(Election.objects.get_previous_election())
election2 = ElectionFactory(
polling_start_time=now() - datetime.timedelta(days=1),
polling_end_time=now() - datetime.timedelta(days=1),
)
self.assertEqual(election2, Election.objects.get_previous_election())
ElectionFactory(
polling_start_time=now() - datetime.timedelta(days=2),
polling_end_time=now() - datetime.timedelta(days=2),
)
self.assertEqual(election2, Election.objects.get_previous_election())
def test_get_reminders(self):
# Compute reminders for election
start = now() - datetime.timedelta(days=1)
end = now() + datetime.timedelta(days=1)
election = ElectionFactory(
polling_start_time=start,
polling_end_time=end,
)
# Mark one "sent"
ReminderQueued.objects.create(
election=election,
message_number=1,
reminder_number=1,
)
reminders = election.get_reminders()
self.assertEqual(6 * 3 + 4, len(reminders)) # Lots of reminders
reminders = election.get_unsent_reminders()
self.assertEqual(6 * 3 + 4 - 1, len(reminders)) # One fewer unsent reminder
long_time_ago = start - datetime.timedelta(days=36500)
long_time_from_now = end + datetime.timedelta(days=36500)
# And the unsent ones are all "due" sometime in this range
reminders = election.get_due_unsent_reminders(long_time_ago, long_time_from_now)
self.assertEqual(6 * 3 + 4 - 1, len(reminders)) # One fewer unsent reminder
# There should be no reminders due a long time ago.
reminders = election.get_due_unsent_reminders(long_time_ago, long_time_ago)
self.assertFalse(reminders)
def test_schedule_due_reminders(self):
# If polling starts right now, there should be a reminder
# due around now too.
polling_start_time = now()
polling_end_time = now() + datetime.timedelta(days=1)
election = ElectionFactory(
polling_start_time=polling_start_time,
polling_end_time=polling_end_time,
)
from_ = polling_start_time - datetime.timedelta(minutes=10)
to = polling_start_time + datetime.timedelta(minutes=10)
s | elf.asse | rtTrue(election.get_due_unsent_reminders(from_, to))
self.assertFalse(ReminderQueued.objects.all().exists())
with patch('voting.models.message_reminder_task') as mock_task:
election.schedule_due_reminders(from_, to)
assert mock_task.delay.called
self.assertTrue(ReminderQueued.objects.all().exists())
class RegistrationPeriodManagerTest(TestCase):
def test_no_periods(self):
self.assertFalse(RegistrationPeriod.objects.in_progress())
def test_period_past(self):
RegistrationPeriodFactory(
start_time=now() - datetime.timedelta(days=2),
end_time=now() - datetime.timedelta(days=1),
)
self.assertFalse(RegistrationPeriod.objects.in_progress())
def test_period_not_started(self):
RegistrationPeriodFactory(
start_time=now() + datetime.timedelta(days=1),
end_time=now() + datetime.timedelta(days=2),
)
self.assertFalse(RegistrationPeriod.objects.in_progress())
def test_between_periods(self):
# Make sure our query doesn't mistake periods before and after the
# current time as a period currently in progress
RegistrationPeriodFactory(
start_time=now() - datetime.timedelta(days=2),
end_time=now() - datetime.timedelta(days=1),
)
RegistrationPeriodFactory(
start_time=now() + datetime.timedelta(days=1),
end_time=now() + datetime.timedelta(days=2),
)
self.assertFalse(RegistrationPeriod.objects.in_progress())
def test_during_period(self):
RegistrationPeriodFactory(
start_time=now() - datetime.timedelta(days=1),
end_time=now() + datetime.timed |
Spiderlover/Toontown | toontown/nametag/NametagGroup.py | Python | mit | 12,905 | 0.00031 | from direct.task.Task import Task
from pandac.PandaModules import VBase4, PandaNode
from toontown.margins.MarginVisible import MarginVisible
from toontown.nametag import NametagGlobals
from toontown.nametag.Nametag2d import Nametag2d
from toontown.nametag.Nametag3d import Nametag3d
class NametagGroup:
CHAT_TIMEOUT_MIN = 4.0
CHAT_TIMEOUT_MAX = 12.0
CHAT_STOMP_DELAY = 0.2
def __init__(self):
self.avatar = None
self.active = True
self.objectCode = None
self.chatButton = NametagGlobals.noButton
self.chatReversed = False
self.font = None
self.chatFont = None
self.shadow = None
self.marginManager = None
self.visible3d = True
self.chatType = NametagGlobals.CHAT
self.chatBalloonType = NametagGlobals.CHAT_BALLOON
self.nametagColor = NametagGlobals.NametagColors[NametagGlobals.CCNormal]
self.chatColor = NametagGlobals.ChatColors[NametagGlobals.CCNormal]
self.speedChatColor = VBase4(1, 1, 1, 1)
self.wordWrap = 8
self.chatWordWrap = 12
self.text = ''
self.chatPages = []
self.chatPageIndex = 0
self.chatTimeoutTask = None
self.chatTimeoutTaskName = self.getUniqueName() + '-timeout'
self.stompChatText = ''
self.stompTask = None
self.stompTaskName = self.getUniqueName() + '-stomp'
self.icon = PandaNode('icon')
self.nametag2d = Nametag2d()
self.nametag3d = Nametag3d()
self.nametags = set()
self.add(self.nametag2d)
self.add(self.nametag3d)
# Ad | d the tick task:
self.tick | TaskName = self.getUniqueName() + '-tick'
self.tickTask = taskMgr.add(self.tick, self.tickTaskName, sort=45)
def destroy(self):
if self.marginManager is not None:
self.unmanage(self.marginManager)
if self.tickTask is not None:
taskMgr.remove(self.tickTask)
self.tickTask = None
self.clearChatText()
for nametag in list(self.nametags):
self.remove(nametag)
self.nametag2d = None
self.nametag3d = None
if self.icon is not None:
self.icon.removeAllChildren()
self.icon = None
self.chatFont = None
self.font = None
self.chatButton = NametagGlobals.noButton
self.avatar = None
def getUniqueName(self):
return 'NametagGroup-' + str(id(self))
def tick(self, task):
if (self.avatar is None) or (self.avatar.isEmpty()):
return Task.cont
chatText = self.getChatText()
if (NametagGlobals.forceOnscreenChat and
chatText and
self.chatBalloonType == NametagGlobals.CHAT_BALLOON):
visible3d = False
elif self.avatar == NametagGlobals.me:
if (chatText and
self.chatBalloonType == NametagGlobals.CHAT_BALLOON and
not base.cam.node().isInView(self.avatar.getPos(base.cam))):
visible3d = False
else:
visible3d = True
elif NametagGlobals.force2dNametags:
visible3d = False
elif (not NametagGlobals.want2dNametags and
((not chatText) or (self.chatBalloonType != NametagGlobals.CHAT_BALLOON))):
visible3d = True
elif self.avatar.isHidden():
visible3d = False
else:
visible3d = base.cam.node().isInView(self.avatar.getPos(base.cam))
if visible3d != self.visible3d:
self.visible3d = visible3d
if self.nametag2d is not None:
self.nametag2d.setVisible(not visible3d)
return Task.cont
def setAvatar(self, avatar):
self.avatar = avatar
for nametag in self.nametags:
nametag.setAvatar(self.avatar)
def getAvatar(self):
return self.avatar
def setActive(self, active):
self.active = active
for nametag in self.nametags:
nametag.setActive(self.active)
def getActive(self):
return self.active
def setObjectCode(self, objectCode):
self.objectCode = objectCode
def getObjectCode(self):
return self.objectCode
def setChatButton(self, chatButton):
self.chatButton = chatButton
for nametag in self.nametags:
nametag.setChatButton(self.chatButton)
def getChatButton(self):
return self.chatButton
def hasChatButton(self):
return self.chatButton != NametagGlobals.noButton
def setChatReversed(self, reversed):
self.chatReversed = reversed
for nametag in self.nametags:
nametag.setChatReversed(reversed)
def getChatReversed(self):
return self.chatReversed
def setFont(self, font):
self.font = font
for nametag in self.nametags:
nametag.setFont(self.font)
def getFont(self):
return self.font
def setChatFont(self, chatFont):
self.chatFont = chatFont
for nametag in self.nametags:
nametag.setChatFont(self.chatFont)
def getChatFont(self):
return self.chatFont
def setShadow(self, shadow):
self.shadow = shadow
for nametag in self.nametags:
nametag.setShadow(self.shadow)
def getShadow(self):
return self.shadow
def clearShadow(self):
self.shadow = None
for nametag in self.nametags:
nametag.clearShadow()
def setChatType(self, chatType):
self.chatType = chatType
for nametag in self.nametags:
nametag.setChatType(self.chatType)
def getChatType(self):
return self.chatType
def setChatBalloonType(self, chatBalloonType):
self.chatBalloonType = chatBalloonType
for nametag in self.nametags:
nametag.setChatBalloonType(self.chatBalloonType)
def getChatBalloonType(self):
return self.chatBalloonType
def setNametagColor(self, nametagColor):
self.nametagColor = nametagColor
for nametag in self.nametags:
nametag.setNametagColor(self.nametagColor)
def getNametagColor(self):
return self.nametagColor
def setChatColor(self, chatColor):
self.chatColor = chatColor
for nametag in self.nametags:
nametag.setChatColor(self.chatColor)
def getChatColor(self):
return self.chatColor
def setSpeedChatColor(self, speedChatColor):
self.speedChatColor = speedChatColor
for nametag in self.nametags:
nametag.setSpeedChatColor(self.speedChatColor)
def getSpeedChatColor(self):
return self.speedChatColor
def setWordWrap(self, wordWrap):
self.wordWrap = wordWrap
for nametag in self.nametags:
nametag.setWordWrap(self.wordWrap)
def getWordWrap(self):
return self.wordWrap
def setChatWordWrap(self, chatWordWrap):
self.chatWordWrap = chatWordWrap
for nametag in self.nametags:
nametag.setChatWordWrap(self.chatWordWrap)
def getChatWordWrap(self):
return self.chatWordWrap
def setText(self, text):
self.text = text
for nametag in self.nametags:
nametag.setText(self.text)
nametag.update()
def getText(self):
return self.text
def getNumChatPages(self):
return len(self.chatPages)
def setChatPageIndex(self, chatPageIndex):
if chatPageIndex >= self.getNumChatPages():
return
self.chatPageIndex = chatPageIndex
for nametag in self.nametags:
nametag.setChatText(self.chatPages[self.chatPageIndex])
nametag.update()
def getChatPageIndex(self):
return self.chatPageIndex
def setChatText(self, chatText, timeout=False):
# If we are currently displaying chat text, we need to "stomp" it. In
# other words, we need to clear the current chat text, pause for a
# brief moment, and then display the new chat text:
if self.getChatText():
self.clearChatT |
shoaibkamil/asp | tools/debugger/stencil/stencil_convert.py | Python | bsd-3-clause | 9,147 | 0.004373 | """Takes an unrolled StencilModel and converts it to a C++ AST.
The third stage in processing. Input must be processed with
StencilUnrollNeighborIter first to remove neighbor loops and
InputElementZeroOffset nodes. Done once per call.
"""
import ast
import asp.codegen.cpp_ast as cpp_ast
import asp.codegen.ast_tools as ast_tools
import stencil_model
from assert_utils import *
class StencilConvertAST(ast_tools.ConvertAST):
def __init__(self, model, input_grids, output_grid, inject_failure=None):
assert_has_type(model, stencil_model.StencilModel)
assert len(input_grids) == len(model.input_grids), 'Incorrect number of input grids'
self.model = model
self.input_grids = input_grids
self.output_grid = output_grid
self.output_grid_name = 'out_grid'
self.dim_vars = []
self.var_names = [self.output_grid_name]
self.next_fresh_var = 0
self.inject_failure = inject_failure
super(StencilConvertAST, self).__init__()
def run(self):
self.model = self.visit(self.model)
assert_has_type(self.model, cpp_ast.FunctionBody)
StencilConvertAST.VerifyOnlyCppNodes().visit(self.model)
return self.model
class VerifyOnlyCppNodes(ast_tools.NodeVisitorCustomNodes):
def visit(self, node):
for field, value in ast.iter_fields(node):
if type(value) in [StringType, IntType, LongType, FloatType]:
pass
elif isinstance(value, list):
for item in value:
if ast_tools.is_cpp_node(item):
self.visit(item)
elif ast_tools.is_cpp_node(value):
self.visit(value)
else:
assert False, 'Expected only codepy.cgen.Generable nodes and primitives but found %s' % value
# Visitors
def visit_StencilModel(self, node):
self.argdict = dict()
for i in range(len(node.input_grids)):
self.var_names.append(node.input_grids[i].name)
self.argdict[node.input_grids[i].name] = self.input_grids[i]
self.argdict[self.output_grid_name] = self.output_grid
assert node.border_kernel.body == [], 'Border kernels not yet implemented'
func_name = "kernel"
arg_names = [x.name for x in node.input_grids] + [self.output_grid_name]
args = [cpp_ast.Pointer(cpp_ast.Value("PyObject", x)) for x in arg_names]
body = cpp_ast.Block()
# generate the code to unpack arrays into C++ pointers and macros for accessing
# the arrays
body.extend([self.gen_array_macro_definition(x) for x in self.argdict])
body.extend(self.gen_array_unpack())
body.append(self.visit_interior_kernel(node.interior_kernel))
return cpp_ast.FunctionBody(cpp_ast.FunctionDeclaration(cpp_ast.Value("void", func_name), args),
body)
def visit_interior_kernel(self, node):
cur_node, ret_node = self.gen_loops(node)
body = cpp_ast.Block()
self.output_index_var = cpp_ast.CName(self.gen_fresh_var())
body.append(cpp_ast.Value("int", self.output_index_var))
body.append(cpp_ast.Assign(self.output_index_var,
self.gen_array_macro(
self.output_grid_name, [cpp_ast.CName(x) for x in self.dim_vars])))
replaced_body = None
for gridname in self.argdict.keys():
replaced_body = [ast_tools.ASTNodeReplacer(
ast.Name(gridname, None), ast.Name("_my_"+gridname, None)).visit(x) for x in node.body]
body.extend([self.visit(x) for x in replaced_body])
cur_node.body = body
return ret_node
def visit_OutputAssignment(self, node):
return cpp_ast.Assign(self.visit(stencil_model.OutputElement()), self.visit(node.value))
def visit_Constant(self, node):
return node.value
def visit_ScalarBinOp(self, node):
return super(StencilConvertAST, self).visit_BinOp(ast.BinOp(node.left, node.op, node.right))
def visit_OutputElement(self, node):
return cpp_ast.Subscript("_my_" + self.output_grid_name, self.output_index_var)
def visit_InputElement(self, node):
index = self.gen_array_macro(node.grid.name,
map(lambda x,y: cpp_ast.BinOp(cpp_ast.CName(x), "+", cpp_ast.CNumber(y)),
self.dim_vars,
node.offset_list))
return cpp_ast.Subscript("_my_" + node.grid.name, index)
def visit_InputElementExprIndex(self, node):
return cpp_ast.Subscript("_my_" + node.grid.name, self.visit(node.index))
def visit_MathFunction(self, node):
return cpp_ast.FunctionCall(cpp_ast.CName(node.name), params=map(self.visit, node.args))
# Helper functions
def gen_array_macro_definition(self, arg):
array = self.argdict[arg]
defname = "_"+arg+"_array_macro"
params = "(" + ','.join(["_d"+str(x) for x in xrange(array.dim)]) + ")"
calc = "(_d%d" % (array.dim-1)
for x in range(0,array.dim-1):
calc += "+(_d%s * %s)" % (str(x), str(array.data.strides[x]/array.data.itemsize))
calc += ")"
return cpp_ast.Define(defname+params, calc)
def gen_array_macro(self, arg, point):
name = "_%s_array_macro" % arg
return cpp_ast.Call(cpp_ast.CName(name), point)
def gen_array_unpack(self):
ret = [cpp_ast.Assign(cpp_ast.Pointer(cpp_ast.Value("npy_double", "_my_"+x)),
cpp_ast.TypeCast(cpp_ast.Pointer(cpp_ast.Value("npy_double", "")), cpp_ast.FunctionCall(cpp_ast.CName("PyArray_DATA"), params=[cpp_ast.CName(x)])))
for x in self.argdict.keys()]
return ret
def gen_loops(self, node):
dim = len(self.output_grid.shape)
ret_node = None
cur_node = None
def add_one(n):
if self.inject_failure == 'loop_off_by_one':
return cpp_ast.CNumber(n.num + 1)
else:
return n
for d in xrange(dim):
dim_var = self.gen_f | resh_var()
self.dim_vars.append(dim_var)
initial = cpp_ast.CNumber(self.output_grid.ghost_depth)
end = cpp_ast.CNumber | (self.output_grid.shape[d]-self.output_grid.ghost_depth-1)
increment = cpp_ast.CNumber(1)
if d == 0:
ret_node = cpp_ast.For(dim_var, add_one(initial), add_one(end), increment, cpp_ast.Block())
cur_node = ret_node
elif d == dim-2:
# add OpenMP parallel pragma to 2nd innermost loop
pragma = cpp_ast.Pragma("omp parallel for")
for_node = cpp_ast.For(dim_var, add_one(initial), add_one(end), increment, cpp_ast.Block())
cur_node.body = cpp_ast.Block(contents=[pragma, for_node])
cur_node = for_node
elif d == dim-1:
# add ivdep pragma to innermost node
pragma = cpp_ast.Pragma("ivdep")
for_node = cpp_ast.For(dim_var, add_one(initial), add_one(end), increment,
cpp_ast.Block())
cur_node.body = cpp_ast.Block(contents=[pragma, for_node])
cur_node = for_node
else:
cur_node.body = cpp_ast.For(dim_var, add_one(initial), add_one(end), increment, cpp_ast.Block())
cur_node = cur_node.body
return (cur_node, ret_node)
def gen_fresh_var(self):
while True:
self.next_fresh_var += 1
var = "x%d" % self.next_fresh_var
if var not in self.var_names:
return var
class StencilConvertASTCilk(StencilConvertAST):
class CilkFor(cpp_ast.For):
def intro_line(self):
return "cilk_for (%s; %s; %s += %s)" % (self.start, self.condition, self.loopvar, self.increment)
def gen_loops(self, node):
dim = len(sel |
netanelravid/screener | tests/unit/test_helpers.py | Python | apache-2.0 | 2,611 | 0 | import logging
import sys
from logging import (
DEBUG,
INFO,
WARNING,
CRITICAL,
_loggerClass
)
import pytest
from future.backports.test.support import import_module
from screener.helpers import (
screener_init,
get_user_arguments,
init_loggers,
)
from screener.settings import (
init_logger,
NUM_OF_ARGS,
MODULES_WITH_LOGGERS,
)
@pytest.mark.parametrize('arguments, results', [
(('', '-u', '123'),
{'URL': '123',
'--dir': 'Results',
'--output': 'screenshot',
'--verbose': 0}),
(('', '-u', '123', '-v'),
{'URL': '123',
'--dir': 'Results',
'--output': 'screenshot',
'--verbose': 1}),
(('', '-u', '123', '-vvv'),
{'URL': '123',
'--dir': 'Results',
'--output': 'screenshot',
'--verbose': 3}),
(('', '-u', '123', '-d', 'temp_dir'),
{'URL': '123',
'--dir': 'temp_dir',
'--output': 'screenshot',
'--verbose': 0}),
(('', '-u', '123', '--dir', 'temp_dir'),
{'URL': '123',
'--dir': 'temp_dir',
'--output': 'screenshot',
'--verbose': 0}),
(('', '-u', '123' | , '-o', 'temp_image'),
{'URL': '1 | 23',
'--dir': 'Results',
'--output': 'temp_image',
'--verbose': 0}),
(('', '-u', '123', '--output', 'temp_image'),
{'URL': '123',
'--dir': 'Results',
'--output': 'temp_image',
'--verbose': 0}),
(('', '-u', '123', '-d', 'temp_dir', '-o', 'temp_image'),
{'URL': '123',
'--dir': 'temp_dir',
'--output': 'temp_image',
'--verbose': 0}),
])
def test_get_user_arguments(arguments, results):
sys.argv = arguments
args = get_user_arguments()
args.pop('--url')
assert args == results
def test_get_user_arguments_total_num():
sys.argv = ('', '-u', '123')
args = get_user_arguments()
args.pop('--url')
assert len(args) == NUM_OF_ARGS
def test_screener_init():
# Dump test, check that it runs without errors.
screener_init()
def test_init_loggers():
init_loggers(logging.INFO)
modules_with_loggers = [import_module(module)
for module in MODULES_WITH_LOGGERS]
for module in modules_with_loggers:
assert isinstance(module.logger, _loggerClass)
@pytest.mark.parametrize('verbose_level, logging_level', [
(0, CRITICAL),
(1, WARNING),
(2, INFO),
(3, DEBUG),
])
def test_init_loggers_verbose_level(verbose_level, logging_level):
init_loggers(verbose_level=verbose_level)
test_logger = init_logger(__name__)
assert test_logger.handlers[0].level == logging_level
|
jonathonwalz/ansible | lib/ansible/modules/cloud/cloudstack/cs_host.py | Python | gpl-3.0 | 17,672 | 0.000792 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_host
short_description: Manages hosts on Apache CloudStack based clouds.
description:
- Create, update and remove hosts.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the host.
required: true
aliases: [ 'ip_address' ]
url:
description:
- Url of the host used to create a host.
- If not provided, C(http://) and param C(name) is used as url.
- Only considered if C(state=present) and host does not yet exist.
required: false
default: null
username:
description:
- Username for the host.
- Required if C(state=present) and host does not yet exist.
required: false
default: null
password:
description:
- Password for the host.
- Required if C(state=present) and host does not yet exist.
required: false
default: null
pod:
description:
- Name of the pod.
- Required if C(state=present) and host does not yet exist.
required: false
default: null
cluster:
description:
- Name of the cluster.
required: false
default: null
hypervisor:
description:
- Name of the cluster.
- Required if C(state=present) and host does not yet exist.
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM', 'Simulator' ]
required: false
default: null
allocation_state:
description:
- Allocation state of the host.
choices: [ 'enabled', 'disabled' ]
required: false
default: null
host_tags:
description:
- Tags of the host.
required: false
default: null
state:
description:
- State of the host.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
zone:
description:
- Name of the zone in which the host should be deployed.
- If not set, default zone is used.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a host is present but disabled
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
cluster: vcenter.example.com/ch-zrh-ix/pod01-cluster01
pod: pod01
zone: ch-zrh-ix-01
hypervisor: VMware
allocation_state: disabled
host_tags:
- perf
- gpu
# Ensure an existing host is disabled
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: disabled
# Ensure an existing host is disabled
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: enabled
# Ensure a host is absent
- local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
capabilities:
description: Capabilities of the host.
returned: success
type: string
sample: hvm
cluster:
description: Cluster of the host.
returned: success
type: string
sample: vcenter.example.com/zone/cluster01
cluster_type:
description: Type of the cluster of the host.
returned: success
type: string
sample: ExternalManaged
cpu_allocated:
description: Amount in percent of the host's CPU currently allocated.
returned: success
type: string
sample: 166.25%
cpu_number:
description: Number of CPUs of the host.
returned: success
type: string
sample: 24
cpu_sockets:
description: Number of CPU sockets of the host.
returned: success
type: int
sample: 2
cpu_speed:
description: CPU speed in Mhz
returned: success
type: int
sample: 1999
cpu_used:
description: Amount of the host's CPU currently used.
returned: success
type: string
sample: 33.6%
cpu_with_overprovisioning:
description: Amount of the host's CPU after applying the cpu.overprovisioning.factor.
returned: success
type: string
sample: 959520.0
created:
description: Date when the host was created.
returned: success
type: string
sample: 2015-05-03T15:05:51+0200
disconnected:
description: Date when the host was disconnected.
returned: success
type: string
sample: 2015-05-03T15:05:51+0200
disk_size_allocated:
description: Host's currently allocated disk size.
returned: success
type: int
sample: 2593
disk_size_total:
description: Total disk size of the host
returned: success
type: int
sample: 259300
events:
description: Events available for the host
returned: success
type: string
sample: "Ping; HostDown; AgentConnected; AgentDisconnected; PingTimeout; ShutdownRequested; Remove; StartAgentRebalance; ManagementServerDown"
ha_host:
description: Whether the host is a HA host.
returned: success
type: bool
sample: false
has_enough_capacity:
description: Whether the host has enough CPU and RAM capacity to migrate a VM to it.
returned: success
type: bool
sample: true
host_tags:
description: Comma-separated list of tags for the host.
returned: success
type: string
sample: "perf"
hypervisor:
description: Host's hypervisor.
returned: success
type: string
sample: VMware
hypervisor_version:
description: Hypervisor version.
returned: success
type: string
sample: 5.1
ip_address:
description: IP address of the host
returned: success
type: string
sample: 10.10.10.1
is_local_storage_active:
description: Whether the local storage is available or not.
returned: success
type: bool
sample: false
last_pinged:
description: Date and time the host was last pinged.
returned: success
type: string
sample: "1970-01-17T17:27:32+0100"
management_server_id:
description: Management server ID of the host.
returned: success
type: int
sample: 345050593418
memory_allocated:
description: Amount of the host's memory currently allocated.
returned: success
type: int
sample: 69793218560
memory_total:
description: Total of memory of the host.
returned: success
type: int
sample: 206085263360
memory_used:
description: Amount of the host's memory currently used.
returned: success
type: int
sample: 65504776192
name:
| description: Name of the host.
returned: success
type: string
sample: esx32.example.com
network_kbs_read:
description: Incoming network traffic on the host.
returned: success
type: int
sample: 0
network_kbs_write:
description: Outgoing network traffic on the host.
returned: success
type: int
sample: 0
os_category:
description: OS category name of the host.
returned: succes | s
type: string
sample: ...
out_of_band_management:
description: Host out-of-band management information.
returned: success
type: string
sample: ...
pod:
description: Pod name of the host.
returned: success
type: string
sample: Pod01
removed:
description: Date and time the host was removed.
returned: success
type: string
sample: "1970-01-17T17:27:32+0100"
resource_state:
description: Resource state of the host.
returned: success
type: string
sample: Enabled
allocation_state::
description: Allocation state of the host.
returned: success
type: string
sample: enabled
state:
description: State of the host.
returned: success
type: string
sample: Up
suitable_for_migration:
description: Whether this host is suitable (has enough capacity and satisf |
zatr/testassist | settings.py | Python | mit | 117 | 0.008547 | s | erver = '127.0.0.1'
username = 'admin'
password = 'password'
diagnostic_command = 'run_diag'
log_command = 'run_log' | |
kchard/surframble | buoy_to_json/buoy_to_json.py | Python | mit | 644 | 0.003106 | import os
import zmq
import json
import logging
logging.basicConfig(format='%(asctime)s - | %(message)s', level=logging.INFO)
buoy_data_folder = os.path.join(os.environ['RESOURCE_DIR'], 'buoy')
context = zmq.Context | ()
socket = context.socket(zmq.SUB)
socket.connect ('tcp://broker:5557')
socket.setsockopt(zmq.SUBSCRIBE, '')
logging.info("Waiting for data")
while True:
surf_data = socket.recv_json()
logging.info("Recieved data: %s" % surf_data)
buoy_file_name = "%s.json" % str(surf_data['id'])
with open(os.path.join(buoy_data_folder, buoy_file_name), 'wb') as f:
json.dump(surf_data, f, sort_keys=True, indent=4)
|
svagionitis/puzzles | adventofcode.com/2015/1/sol1.py | Python | mit | 581 | 0.001721 |
if __name__ == "__main__":
with open("input.txt") as f:
res = 0
data = f.read()
flag_basement_first = False
for i, d in enumerate(data):
if d == '(':
res += 1
if d == ')':
res -= 1
if res == -1 and flag_basement_first is False:
print '2. What is the position of the character that causes Santa to first enter the basement? %d' % (i+1)
| flag_basement_first = True
print '1. T | o what floor do the instructions take Santa? %d' % res
|
valentinedwv/pdfssa4met | headings.py | Python | gpl-3.0 | 4,413 | 0.006571 | #!/bin/env python
""" Extract and tag References from a PDF.
Created on Mar 1, 2010
@author: John Harrison
Usage: headings.py OPTIONS FILEPATH
OPTIONS:
--help, -h Print help and exit
--noxml Do not tag individual headings with XML tags.
Default is to include tagging.
--title Only print title then exit
--author Only print author then exit
"""
import sys, getopt
from lxml import etree
from utils import UsageError, ConfigError, mean, median
from pdf2xml import pdf2etree
def pdf2heads(opts, args):
xmltag = True
highlight = False
titleonly = False
authonly = False
for o, a in opts:
if (o == '--noxml'):
xmltag = False
elif (o == '--highlight'):
highlight = True
if (o == '--title'):
titleonly = True
elif (o == '--author'):
authonly = True
tree = pdf2etree(args)
# find title
page = 1
block = 1
title_node = None
while True:
try: title_node = tree.xpath("//PAGE[{0}]//BLOCK[{1}]".format(page, block))[0]
except IndexError: page+=1
else: break
if page > 2:
# probably not going to find it now
break
# find author
page = 1
block = 2
auth_node = None
while True:
try: auth_node = tree.xpath("//PAGE[{0}]//BLOCK[{1}]".format(page, block))[0]
except InbdexError: block+=1
else: break
if block > 4:
# probably not going to find it now
break
font_sizes = tree.xpath('//TOKEN/@font-size')
mean_font_size = mean(font_sizes)
median_font_size = median(font_sizes)
#print "Median Font Size (i.e. body text):", median_font_size
font_colors = tree.xpath('//TOKEN/@font-color')
font_color_hash = {}
for fc in font_colors:
try:
font_color_hash[fc]+=1
except KeyError:
font_color_hash[fc] = 1
s | ortlist = [(v,k) for k,v in font_color_hash.iteritems()]
sortlist.sort(reverse=True)
main_font_color = sortlist[0][1]
head_txts = []
stop = False
for page_node in tree.xpath('//PAGE'):
for | block_node in page_node.xpath('.//BLOCK'):
if xmltag:
if block_node == title_node:
st = "<title>"
et = "</title>"
elif block_node == auth_node:
st = "<author>"
et = "</author>"
else:
st = "<heading>"
et = "</heading>"
if highlight:
st = "\033[0;32m{0}\033[0m".format(st)
et = "\033[0;32m{0}\033[0m".format(et)
else:
st = et = ""
if block_node == title_node and authonly:
continue
headers = block_node.xpath(".//TOKEN[@font-size > {0} or @bold = 'yes' or @font-color != '{1}']".format(mean_font_size*1.05, main_font_color))
head_txt = ' '.join([etree.tostring(el, method='text', encoding="UTF-8") for el in headers])
if len(head_txt):
head_txts.append("{0}{1}{2}".format(st, head_txt, et))
if block_node == title_node and titleonly:
stop = True
break
elif block_node == auth_node and authonly:
stop = True
break
if stop:
break
for txt in head_txts:
sys.stdout.writelines([txt, '\n'])
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
try:
try:
opts, args = getopt.getopt(argv, "ht", ["help", "test", "noxml", "highlight", "title", "author"])
except getopt.error as msg:
raise UsageError(msg)
for o, a in opts:
if (o in ['-h', '--help']):
# print help and exit
sys.stdout.write(__doc__)
sys.stdout.flush()
return 0
pdf2heads(opts, args)
except UsageError as err:
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
except ConfigError, err:
sys.stderr.writelines([str(err.msg),'\n'])
sys.stderr.flush()
return 1
if __name__ == '__main__':
sys.exit(main())
|
big-data-research/neuralnetworks_workshop_bucharest_2015 | nn_demo/01nn_otto.py | Python | apache-2.0 | 11,593 | 0.001984 | __author__ = 'alexs'
import theano.tensor as T
import theano
import numpy as np
import cPickle
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np
import random
import json
def getReferenceLabels():
referenceLabels = dict()
for i in range(0, 9):
reference_out = [0.0 for x in range(0, 9)]
reference_out[i] = 0.99
referenceLabels[i] = reference_out
return referenceLabels
def compare(result_label, given_label, reference_labels):
givenKey = 0
resultedKey = 0
refGivenScore = 1000
refResultedScore = 1000
for key in reference_labels.keys():
score1 = np.sum(np.abs(np.array(given_label) - np.array(reference_labels[key])))
score2 = np.sum(np.abs(result_label - np.array(reference_labels[key])))
if score1 < refGivenScore:
refGivenScore = score1
givenKey = key
if score2 < refResultedScore:
refResultedScore = score2
resultedKey = key
if resultedKey == givenKey:
return True
return False
def makeW(rows, columns, start=-2, end=2):
w = np.random.uniform(start, end, (rows, columns))
return w
def updates_weights_function(weights, memories, cost_function, learning_rate=0.01, momentum_learning_rate=0.005):
gradients = T.grad(cost_function, weights) # keep in mind len(gradients) == len(weights)
update_lists = []
for i in range(0, len(weights)):
weight = weights[i]
gradient = g | radients[i]
memory = memories[i]
change = learning_rate * gradient + momentum_learning_rate * memory
new_val = weight - change
update_lists.append((weight, new_val))
update_lists.append((memory, change))
return update_lists
class NN():
def __init__(self):
self.layers = []
self.weights = []
self.weights_memory = []
self.cost = None
self.train = None
self.updates = None
self.activat | e = None
self.activatwe = None
self.output = None
def build(self, givenWeights=None):
# first: init or build the in-between weight matrixes
for i in range(0, len(self.layers) - 1):
n = self.layers[i].size
m = self.layers[i + 1].size
if givenWeights:
w_values = givenWeights[i]
else:
w_values = makeW(n, m)
w_memory_values = np.zeros((n, m))
w = theano.shared(value=w_values, name="w_" + str(i) + "_" + str(i + 1))
w_memory = theano.shared(value=w_memory_values, name="w_memory_" + str(i) + "_" + str(i + 1))
self.weights.append(w)
self.weights_memory.append(w_memory)
# now build the model
inputVector = T.matrix("inputVector")
labels = T.matrix("labels")
out = None
net = None
workingV = inputVector
l2 = 0.0
l1 = 0.0
for i in range(0, len(self.weights)):
w = self.weights[i]
l2 += T.sum(w * w)
l1 += T.sum(T.abs_(w))
out = T.dot(workingV, w)
net = T.maximum(0, out)
workingV = net
self.cost = T.mean(T.pow(labels - out, 2)) + 0.005 * l2 + 0.005 * l1
self.output = net
self.updates = updates_weights_function(self.weights, self.weights_memory, self.cost)
self.train = theano.function([inputVector, labels], outputs=self.cost, updates=self.updates)
self.activate = theano.function([inputVector, labels], outputs=self.cost)
self.activatwe = theano.function([inputVector], outputs=self.output)
def addLayer(self, layer):
self.layers.append(layer)
def snapshotWeigths(self, experimentId):
with open(str(experimentId) + ".dat", "w") as f:
for w in self.weights:
numeric_value = w.get_value().tolist()
f.write(json.dumps(numeric_value) + "\n")
def resume(self, experimentId="default"):
ww = []
with open(str(experimentId) + ".dat", "r") as f:
for line in f.readlines():
w = np.array(json.loads(line))
ww.append(w)
self.build(ww)
def trainData(self, train_set_input, train_set_labels,
valid_set_input, valid_set_labels,
test_set_input, test_set_labels,
nrOfEpochs=10000, batch_size=1000, experimentId="default"):
reference_labels = getReferenceLabels()
for ep in range(0, nrOfEpochs):
# random.shuffle(train_data)
overallError = 0.0
for j in range(0, len(train_set_input), batch_size):
endInterval = j + batch_size
if j + batch_size > len(train_set_input):
endInterval = len(train_set_input) - 1
i = train_set_input[j:endInterval]
r = train_set_labels[j:endInterval]
self.train(i, r)
for j in range(0, len(train_set_input), batch_size):
endInterval = j + batch_size
if j + batch_size > len(train_set_input):
endInterval = len(train_set_input) - 1
i = train_set_input[j:endInterval]
r = train_set_labels[j:endInterval]
overallError += self.activate(i, r)
posItems = 0.0
failedItems = 0.0
for valid_in, given_label in zip(valid_set_input, valid_set_labels):
result_label = self.activatwe([valid_in])
ok = compare(result_label, given_label, reference_labels)
if ok:
posItems += 1.0
else:
failedItems += 1.0
precision = posItems / (posItems + failedItems)
print(
"[{epoch}] error: {error} precision: {precision}".format(epoch=ep, error=overallError,
precision=precision))
# running tests
self.snapshotWeigths(experimentId)
if test_set_input and test_set_labels:
print("=================== TESTS ==================")
posItems = 0.0
failedItems = 0.0
for valid_in, given_label in zip(test_set_input, test_set_labels):
result_label = self.activatwe([valid_in])
ok = compare(result_label, given_label, reference_labels)
if ok:
posItems += 1.0
else:
failedItems += 1.0
precision = posItems / (posItems + failedItems)
print("Accuracy on {nrOfTests} tests is {precision}".format(nrOfTests=str(len(test_set_input)),
precision=str(precision)))
print("============================================")
class Layer():
def __init__(self, size):
self.size = size
class SigmoidLayer(Layer):
def __init__(self, size):
self.size = size
class StandardOutputWithSigmoid(Layer):
def __init__(self, size):
self.size = size
class InverseOutputLayerWithSigmoid(Layer):
def __init__(self, size):
self.size = size
def transformInput(inputList):
res = []
for input in inputList:
res.append(np.array(input, dtype="float32"))
return res
def transformOutput(outputList, size):
res = []
for out in outputList:
reference_out = [0.1 for x in range(0, size)]
reference_out[out] = 0.88
res.append(np.array(reference_out, dtype="float32"))
return res
def retrieve_training_set():
all_collections = []
df = pd.read_csv("/Users/alexs/work_phd/otto_group_challenge/train.csv")
X = df.values.copy()
np.random.shuffle(X)
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
print labels
encoder = LabelEncoder()
encoded_labels = encoder.fit_transform(labels).astype(np.int32)
all_labels = []
scaler = StandardScaler()
Z = scaler.fit_tran |
jawilson/home-assistant | tests/components/wled/test_config_flow.py | Python | apache-2.0 | 7,451 | 0.000403 | """Tests for the WLED config flow."""
from unittest.mock import MagicMock
from wled import WLEDConnectionError
from homeassistant.components import zeroconf
from homeassistant.components.wled.const import CONF_KEEP_MASTER_LIGHT, DOMAIN
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
async def test_full_user_flow_implementation(
hass: HomeAssistant, mock_wled_config_flow: MagicMock, mock_setup_entry: None
) -> None:
"""Test the full manual user flow from start to finish."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result.get("step_id") == "user"
assert result.get("type") == RESULT_TYPE_FORM
assert "flow_id" in result
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "192.168.1.123"}
)
assert result.get("title") == "192.168.1.123"
assert result.get("type") == RESULT_TYPE_CREATE_ENTRY
assert "data" in result
assert result["data"][CONF_HOST] == "192.168.1.123"
assert result["data"][CONF_MAC] == "aabbccddeeff"
async def test_full_zeroconf_flow_implementation(
hass: HomeAssistant, mock_wled_config_flow: MagicMock, mock_setup_entry: None
) -> None:
"""Test the full manual user flow from start to finish."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={},
type="mock_type",
),
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert result.get("description_placeholders") == {CONF_NAME: "example"}
assert result.get("step_id") == "zeroconf_confirm"
assert result.get("type") == RESULT_TYPE_FORM
assert "flow_id" in result
flow = flows[0]
assert "context" in flow
assert flow["context"][CONF_HOST] == "192.168.1.123"
assert flow["context"][CONF_NAME] == "example"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2.get("title") == "example"
assert result2.get("type") == RESULT_TYPE_CREATE_ENTRY
assert "data" in result2
assert result2["data"][CONF_HOST] == "192.168.1.123"
assert result2["data"][CONF_MAC] == "aabbccddeeff"
async def test_connection_error(
hass: HomeAssistant, mock_wled_config_flow: MagicMock
) -> None:
"""Test we show user form on WLED connection error."""
mock_wled_config_flow.update.side_effect = WLEDConnectionError
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "example.com"},
)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == "user"
assert result.get("errors") == {"base": "cannot_connect"}
async def test_zeroconf_connection_error(
hass: HomeAssistant, mock_wled_config_flow: MagicMock
) -> None:
"""Test we abort zeroconf flow on WLED connection error."""
mock_wled_config_flow.update.side_effect = WLEDConnectionError
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={},
type="mock_type",
),
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "cannot_connect"
async def test_zeroconf_confirm_connection_error(
hass: HomeAssistant, mock_wled_config_flow: MagicMock
) -> None:
"""Test we abort zeroconf flow on WLED connection error."""
mock_wled_config_flow.update.side_effect = WLEDConnectionError
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_ZEROCONF,
CONF_HOST: "example.com",
CONF_NAME: "test",
},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.com.",
name="mock_name",
port=None,
properties={},
type="mock_type",
),
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "cannot_connect"
async def test_user_device_exists_abort(
hass: HomeA | ssistant,
init_integration: MagicMock,
mock_wled_config_flow: MagicMock,
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "192.168.1.123"},
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "already_co | nfigured"
async def test_zeroconf_device_exists_abort(
hass: HomeAssistant,
init_integration: MagicMock,
mock_wled_config_flow: MagicMock,
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={},
type="mock_type",
),
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "already_configured"
async def test_zeroconf_with_mac_device_exists_abort(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_wled_config_flow: MagicMock,
) -> None:
"""Test we abort zeroconf flow if WLED device already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data=zeroconf.ZeroconfServiceInfo(
host="192.168.1.123",
hostname="example.local.",
name="mock_name",
port=None,
properties={CONF_MAC: "aabbccddeeff"},
type="mock_type",
),
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "already_configured"
async def test_options_flow(
hass: HomeAssistant, mock_config_entry: MockConfigEntry
) -> None:
"""Test options config flow."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(mock_config_entry.entry_id)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == "init"
assert "flow_id" in result
result2 = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_KEEP_MASTER_LIGHT: True},
)
assert result2.get("type") == RESULT_TYPE_CREATE_ENTRY
assert result2.get("data") == {
CONF_KEEP_MASTER_LIGHT: True,
}
|
TravisCG/SI_scripts | canpyY.py | Python | gpl-3.0 | 803 | 0.028643 | #!/usr/bin/python
"""
Create Y matrix for Canopy. This is the connection between the SNAs and CNVs
"""
import sys
Wm = open(sys.argv[1])
Wm.readline()
cnvs = list()
header = list()
header.append("non-cna_region")
for i in Wm:
region = i.split()[0]
(chrx, start, end) = region.split("_")
cnvs.append((chrx, int(start), int(end)))
header.append(region)
Wm.close()
X = open(sys.argv[2])
X.readline()
print "\t".join(heade | r)
for i in X:
sna = i.split()[0]
chrx, pos, alt = sna.split("_")
pos = int(pos)
out = list()
out.append(sna)
out.append("0") # non-cna_region
ok = False
for j in cnvs:
if chrx == j[0] and j[1] < pos and j[2] > pos:
out.append("1")
ok = True
else:
out.append("0")
if not ok:
out[1] = "1" # set the n | on-cna_region to 1
print "\t".join(out)
X.close()
|
edoburu/django-parler | parler/utils/views.py | Python | apache-2.0 | 2,807 | 0.001069 | """
Internal DRY functions.
"""
from django.conf import settings
from parler import appsettings
from parler.utils import normalize_language_code, is_multilingual_project, get_language_title
def get_language_parameter(request, query_language_key='language', object=None, default=None):
"""
Get the language parameter from the current request.
"""
# This is the same logic as the django-admin uses.
# The only difference is the origin of the request parameter.
if not is_multilingual_project():
# By default, the objects are stored in a single static language.
# This makes the transition to multilingual easier as well.
# The default language can operate as fallback language too.
return default or appsettings.PARLER_LANGUAGES.get_default_language()
else:
# In multilingual mode, take the provided language of the request.
code = request.GET.get(query_language_key)
if not code:
# forms: show first tab by default
code = default or appsettings.PARLER_LANGUAGES.get_first_language()
return normalize_language_code(code)
def get_language_tabs(request, current_language, available_languages, css_class=None):
"""
Determine the language tabs to show.
"""
tabs = TabsList(css_class=css_class)
get = request.GET.copy() # QueryDict object
tab_languages = []
site_id = getattr(settings, 'SITE_ID', None)
for lang_dict in appsettings.PARLER_LANGUAGES.get(site_id, ()):
code = lang_dict['code']
title = get_language_title(code)
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
elif code in available_languages:
status = 'available'
else:
status = 'empty'
tabs.append((url, title, code, status))
tab_languages.append(code)
# Additional stale translations in the database?
if appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS:
for code in available_languages:
if code not in tab_languages:
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
else:
status = 'available'
tabs.append((url, get_language_title(code), code, status))
tabs.current_is_transl | ated = current_language in available_languages
tabs.allow_deletion = len(available_languages) > 1
return tabs
class TabsList(list):
| def __init__(self, seq=(), css_class=None):
self.css_class = css_class
self.current_is_translated = False
self.allow_deletion = False
super().__init__(seq)
|
Nitaco/ansible | contrib/inventory/scaleway.py | Python | gpl-3.0 | 7,193 | 0.000695 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
External inventory script for Scaleway
====================================
Shamelessly copied from an existing inventory script.
This script generates an inventory that Ansible can understand by making API requests to Scaleway API
Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/)
Before using this script you may want to modify scaleway.ini config file.
This script generates an Ansible hosts file with these host groups:
<hostname>: Defines host itself with Scaleway's hostname as group name.
<tag>: Contains all hosts which has "<tag>" as tag.
<region>: Contains all hosts which are in the "<region>" region.
all: Contains all hosts defined in Scaleway.
'''
# (c) 2017, Paul B. <paul@bonaud.fr>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
import requests
import six
from six.moves import configparser
import sys
import time
import traceback
try:
import json
except ImportError:
import simplejson as json
EMPTY_GROUP = {
'children': [],
'hosts': []
}
class ScalewayAPI:
REGIONS = ['par1', 'ams1']
def __init__(self, auth_token, region):
self.session = requests.session()
self.session.headers.update({
'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0])
})
self.session.headers.update({
'X-Auth-Token': auth_token.encode('latin1')
})
self.base_url = 'https://cp-%s.scaleway.com' % (region)
def servers(self):
raw = self.session.get('/'.join([self.base_url, 'servers']))
try:
response = raw.json()
return self.get_resource('servers', response, raw)
except ValueError:
return []
def get_resource(self, resource, response, raw):
raw.raise_for_status()
if resource in response:
return response[resource]
else:
raise ValueError(
"Resource %s not found in Scaleway API response" % (resource))
def env_or_param(env_key, param=None, fallback=None):
env_value = os.environ.get(env_key)
if (param, env_value) == (None, None):
return fallback
elif env_value is not None:
return env_value
else:
return param
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat(
'/'.join([dpath, 'scaleway_ansible_inventory.json']))
except OSError:
return Fals | e
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
else:
maxage = 60
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate | _inv_from_api(config):
try:
inventory['all'] = copy.deepcopy(EMPTY_GROUP)
if config.has_option('auth', 'api_token'):
auth_token = config.get('auth', 'api_token')
auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token)
if auth_token is None:
sys.stderr.write('ERROR: missing authentication token for Scaleway API')
sys.exit(1)
if config.has_option('compute', 'regions'):
regions = config.get('compute', 'regions')
if regions == 'all':
regions = ScalewayAPI.REGIONS
else:
regions = map(str.strip, regions.split(','))
else:
regions = [
env_or_param('SCALEWAY_REGION', fallback='par1')
]
for region in regions:
api = ScalewayAPI(auth_token, region)
for server in api.servers():
hostname = server['hostname']
if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'):
ip = server['public_ip']['address']
else:
ip = server['private_ip']
for server_tag in server['tags']:
if server_tag not in inventory:
inventory[server_tag] = copy.deepcopy(EMPTY_GROUP)
inventory[server_tag]['children'].append(hostname)
if region not in inventory:
inventory[region] = copy.deepcopy(EMPTY_GROUP)
inventory[region]['children'].append(hostname)
inventory['all']['children'].append(hostname)
inventory[hostname] = []
inventory[hostname].append(ip)
return inventory
except Exception:
# Return empty hosts output
traceback.print_exc()
return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(config):
''' Reads the inventory from cache or Scaleway api '''
if cache_available(config):
inv = get_cache('scaleway_ansible_inventory.json', config)
else:
inv = generate_inv_from_api(config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
# Read config
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
if cache_available(config):
inventory = get_cache('scaleway_ansible_inventory.json', config)
else:
inventory = get_inventory(config)
# return to ansible
sys.stdout.write(str(inventory))
sys.stdout.flush()
|
rcgee/oq-hazardlib | openquake/hazardlib/mfd/youngs_coppersmith_1985.py | Python | agpl-3.0 | 15,232 | 0 | # coding: utf-8
# The Hazard Library
# Copyright (C) 2013-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.youngs_coppersmith_1985` defines the
Youngs and Coppersmith 1985 MFD.
"""
import numpy
from openquake.baselib.python3compat import range, round
from openquake.hazardlib.mfd.base import BaseMFD
# width of the boxcar function representing the characteristic
# distribution
DELTA_CHAR = 0.5
class YoungsCoppersmith1985MFD(BaseMFD):
"""
Class implementing the MFD for the 'Characteristic Earthquake Model' as
described in: "Implications of fault slip rates and earthquake recurrence
models to probabilistic seismic hazard estimates", by Robert R. Youngs and
Kevin J. Coppersmith and published in Bulletin of the Seismological
Society of America, Vol. 75, No. 4, pages 939-964, 1985.
The class implements the MFD under the following assumptions as reported
at page 954:
1) Δ_mc (width of the boxcar distribution representing characteristic
rate) is equal to 0.5 magnitude unit
2) m' (maximum magnitude value for the Gutenberg-Richeter part of the
distribution) is equal to the absolute maximum magnitude minus Δ_mc
(that is there is no gap between the Gutenberg-Richter distribution and
the boxcar distribution)
3) the rate of events at the characteristic magnitude is equal to the
rate of events for magnitude equal to m' - 1
:param min_mag:
The lowest possible magnitude for the MFD. The first bin in the
:meth:`result histogram <get_annual_occurrence_rates>` is aligned
to make its left border match this value.
:param a_val:
The Gutenberg-Richter ``a`` value -- the intercept of the loglinear
cumulative G-R relationship.
:param b_val:
The Gutenberg-Richter ``b`` value -- the gradient of the loglinear
G-R relationship.
:param char_mag:
The characteristic magnitude defining the middle point of the
characteristic distribution. That is the boxcar function representing
the characteristic distribution is defined in the range
[char_mag - 0.25, char_mag + 0.25].
:param char_rate:
The characteristic rate associated to the characteristic magnitude,
to be distributed over the domain of the boxcar function representing
the characteristic distribution (that is λ_char = char_rate / 0.5)
:param bin_width:
A positive float value -- the width of a single histogram bin.
Values for ``min_mag`` and the maximum magnitude (char_mag + 0.25) don't
have to be aligned with respect to ``bin_width``. They get rounded
accordingly anyway so that both are divisible by ``bin_width`` just before
converting a function to a histogram.
See :meth:`_get_min_mag_and_num_bins`.
"""
MODIFICATIONS = set()
def __init__(self, min_mag, a_val, b_val, char_mag, char_rate, bin_width):
self.min_mag = min_mag
self.a_val = a_val
self.b_val = b_val
self.char_mag = char_mag
self.char_rate = char_rate
self.bin_width = bin_width
self.check_constraints()
def get_min_max_mag(self):
"Return the minimum and maximum magnitudes"
mag, num_bins = self._get_min_mag_and_num_bins()
return mag, mag + self. bin_width * (num_bins - 1)
def check_constraints(self):
"""
Checks the following constraints:
* minimum magnitude is positive.
* ``b`` value is positive.
* characteristic magnitude is positive
* characteristic rate is positive
* bin width is in the range (0, 0.5] to allow for at least one bin
representing the characteristic distribution
* characteristic magnitude minus 0.25 (that is the maximum magnitude
of the G-R distribution) is greater than the minimum magnitude by at
least one magnitude bin.
* rate of events at the characteristic magnitude is equal to the
rate of events for magnitude equal to m_prime - 1. This is done
by asserting the equality (up to 7 digit precision) ::
10 ** (a_incr - b * (m' - 1)) == char_rate / 0.5
where ``a_incr`` is the incremental a value obtained from the
cumulative a value using the following formula ::
a_incr = a_val + log10(b_val * ln(10))
and ``m' - 1 = char_mag - 1.25``
"""
if not self.min_mag > 0:
raise ValueError('minimum magnitude must be positive')
if not self.b_val > 0:
raise ValueError('b value must be positive')
if not self.char_mag > 0:
raise ValueError('characteristic magnitude must be positive')
if not self.char_rate > 0:
raise ValueError('characteristic rate must be positive')
if not 0 < self.bin_width <= DELTA_CHAR:
err_msg = 'bin width must be in the range (0, %s] to allow for ' \
'at least one magnitude bin representing the ' \
'characteristic distribution' % DELTA_CHAR
raise ValueError(err_msg)
if not self.char_mag - DELTA_CHAR / 2 >= self.min_mag + self.bin_width:
err_msg = 'Maximum magnitude of the G-R distribution (char_mag ' \
'- 0.25) must be greater than the minimum magnitude ' \
'by at least one magnitude bin.'
raise ValueError(err_msg)
a_incr = self.a_val + numpy.log10(self.b_val * numpy.log(10))
actual = 10 ** (a_incr - self.b_val * (self.char_mag - 1.25))
desired = self.char_rate / DELTA_CHAR
if not numpy.allclose(actual, desired, rtol=0.0, atol=1e-07):
err_msg = 'Rate of events at the characteristic magnitude is ' \
'not equal to the rate of events for magnitude equal ' \
'to char_mag - 1.25'
raise ValueError(err_msg)
@classmethod
def from_total_moment_rate(cls, min_mag, b_val, char_mag,
total_moment_rate, bin_width):
"""
Define Youngs and Coppersmith 1985 MFD by constraing cumulative a
value and characteristic rate from total moment rate.
The cumulative a value and characteristic rate are obtained by
solving equations (16) and (17), page 954, for the cumulative rate of
events with magnitude greater than the minimum magnitude - N(min_mag)
- and the cumulative rate of characteristic earthquakes - N(char_mag).
The difference ``N(mi | n_mag) - N(char_mag)`` represents the rate of
noncharacteristic, exponentially distributed earthquakes and is used
to derive the cumulative a value by solving the following equation ::
10 ** (a_val - b_val * min_mag) -
10 * | * (a_val - b_val * (char_mag - 0.25))
= N(min_mag) - N(char_mag)
which can be written as ::
a_val =
log10(N(min_mag) - N(char_mag)) /
(10 ** (- b_val * min_mag) - 10 ** (- b_val * (char_mag - 0.25))
In the calculation of N(min_mag) and N(char_mag), the Hanks and
Kanamori (1979) formula ::
M0 = 10 ** (1.5 * Mw + 9.05)
is used to convert moment magnitude (Mw) to seismic moment (M0,
Newton × m)
:param min_mag:
The lowest magnitude for the MFD. The first bin in the
:meth:`result histogram <get_annual_occurrence |
mrramazani/mysql-replicant-python-1 | lib/tests/deployment/simple.py | Python | bsd-3-clause | 2,759 | 0.001812 | # Copyright (c) 2010, Mats Kindahl, Charles Bell, and Lars Thalmann
# All rights reserved.
#
# Use of this source code is goverened by a BSD licence that can be
# found in the LICENCE file.
from mysql.replicant.server import Server
from mysql.replicant.common import User
from mysql.replicant.machine import Linux
from mysql.replicant.roles import Master, Final
import time, os.path
class MultiLinux(Linux):
"""Class to handle the case where there are multiple servers
running at the same box, all managed by mysqld_multi."""
def __init__(self, number):
self.__number = number
def stop_server(self, server):
server.ssh(["mysqld_multi", "stop", str(self.__number)])
pidfile = ''.join("/var/run/mysqld", server.name, ".pid")
while os.path.exists(pidfile):
time.sleep(1)
def start_server(self, server):
import time
print "Starting server...",
server.ssh(["mysqld_multi", "start", str(self.__number)])
time.sleep(1) # Need some time for server to start
print "done"
_replicant_user = User("mysql_replicant")
_repl_user = User("repl_user", "xyzzy")
def _cnf(name):
test_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(test_dir, '..', name + ".cnf")
master = Server(server_id=1, name="mysqld1",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), ro | le=Master(_repl_user),
port=3307,
socket='/var/run/m | ysqld/mysqld1.sock',
defaults_file=_cnf("mysqld1"),
config_section="mysqld1")
slaves = [Server(server_id=2, name="mysqld2",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), role=Final(master),
port=3308,
socket='/var/run/mysqld/mysqld2.sock',
defaults_file=_cnf("mysqld2"),
config_section="mysqld2"),
Server(server_id=3, name="mysqld3",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), role=Final(master),
port=3309,
socket='/var/run/mysqld/mysqld3.sock',
defaults_file=_cnf("mysqld3"),
config_section="mysqld3"),
Server(server_id=4, name="mysqld4",
sql_user=_replicant_user,
ssh_user=User("mysql"),
machine=Linux(), role=Final(master),
port=3310,
socket='/var/run/mysqld/mysqld4.sock',
defaults_file=_cnf("mysqld4"),
config_section="mysqld4")]
servers = [master] + slaves
|
Kasai-Code/Kinesis | settings/config.py | Python | mit | 741 | 0.002699 | model_image_path = "model.png"
generate_model_image = False
dataset_training_path = "data/datasets/training.csv"
dataset_testing_path = "data/datasets/testing.csv"
|
supported_characters = [" ", "c", "e", "t", "a", "r", "i", "o", "s", "n", "c", "l", "d", "_", "u", "m", " ", "p", "f", ".", "g", "h", ",", "\"", "/", "0", "-", "b", ")", "(" | , "1", "E", "*", "=", ":", ";", ">", "T", "S", "y", "2", "R", "A", "v", "w", "L", "C", "<", "N", "x", "I", "k", "'", "D", "O", "P", "3", "#", "M", "F", "U", "4", "5", "9", "V", "{", "}", "B", "8", "H", "G", "6", "+", "7", "z", "W", "$", "[", "]", "@", "&", "j", "q", "\\", "Y", "K", "X", "!", "%", "|", "5", "`", "Q", "J", "Z", "?", "^", "~"]
supported_characters_size = len(supported_characters) + 1
|
home-assistant/home-assistant | homeassistant/components/fritz/common.py | Python | apache-2.0 | 19,263 | 0.000779 | """Support for AVM FRITZ!Box classes."""
from __future__ import annotations
from collections.abc import Callable, ValuesView
from dataclasses import dataclass, field
from datetime import datetime, timedelta
import logging
from types import MappingProxyType
from typing import Any, TypedDict, cast
from fritzconnection import FritzConnection
from fritzconnection.core.exceptions import (
FritzActionError,
FritzConnectionException,
FritzSecurityError,
FritzServiceError,
)
from fritzconnection.lib.fritzhosts import FritzHosts
from fritzconnection.lib.fritzstatus import FritzStatus
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.device_tracker.const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
)
from homeassistant.components.switch import DOMAIN as DEVICE_SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import update_coordinator
from homeassistant.helpers.device_registry import (
CONNECTION_NETWORK_MAC,
async_entries_for_config_entry,
async_get,
)
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_registry import (
EntityRegistry,
RegistryEntry,
async_entries_for_device,
)
from homeassistant.util import dt as dt_util
from .const import (
DEFAULT_DEVICE_NAME,
DEFAULT_HOST,
DEFAULT_PORT,
DEFAULT_USERNAME,
DOMAIN,
SERVICE_CLEANUP,
SERVICE_REBOOT,
SERVICE_RECONNECT,
)
_LOGGER = logging.getLogger(__name__)
def _is_tracked(mac: str, current_devices: ValuesView) -> bool:
"""Check if device is already tracked."""
for tracked in current_devices:
if mac in tracked:
return True
return False
def device_filter_out_from_trackers(
mac: str,
device: FritzDevice,
current_devices: ValuesView,
) -> bool:
"""Check if device should be filtered out from trackers."""
reason: str | None = None
if device.ip_address == "":
reason = "Missing IP"
elif _is_tracked(mac, current_devices):
reason = "Already tracked"
if reason:
_LOGGER.debug(
"Skip adding device %s [%s], reason: %s", device.hostname, mac, reason
)
return bool(reason)
def _cleanup_entity_filter(device: RegistryEntry) -> bool:
"""Filter only relevant entities."""
return device.domain == DEVICE_TRACKER_DOMAIN or (
device.domain == DEVICE_SWITCH_DOMAIN and "_internet_access" in device.entity_id
)
class ClassSetupMissing(Exception):
"""Raised when a Class func is called before setup."""
def __init__(self) -> None:
"""Init custom exception."""
super().__init__("Function called before Class setup")
@dataclass
class Device:
"""FRITZ!Box device class."""
mac: str
ip_address: str
name: str
wan_access: bool
class HostInfo(TypedDict):
"""FRITZ!Box host info class."""
mac: str
name: str
ip: str
status: bool
class FritzBoxTools(update_coordinator.DataUpdateCoordinator):
"""FrtizBoxTools class."""
def __init__(
self,
hass: HomeAssistant,
password: str,
username: str = DEFAULT_USERNAME,
host: str = DEFAULT_HOST,
port: int = DEFAULT_PORT,
) -> None:
"""Initialize FritzboxTools class."""
super().__init__(
hass=hass,
logger=_LOGGER,
name=f"{DOMAIN}-{host}-coordinator",
update_interval=timedelta(seconds=30),
)
self._devices: dict[str, FritzDevice] = {}
self._options: MappingProxyType[str, Any] | None = None
self._unique_id: str | None = None
self.connection: FritzConnection = None
self.fritz_hosts: FritzHosts = None
self.fritz_status: FritzStatus = None
self.hass = hass
self.host = host
self.password = password
self.port = port
self.username = username
self._mac: str | None = None
self._model: str | None = None
self._current_firmware: str | None = None
self._latest_firmware: str | None = None
self._update_available: bool = False
async def async_setup(
self, options: MappingProxyType[str, Any] | None = None
) -> None:
"""Wrap up FritzboxTools class setup."""
self._options = options
await self.hass.async_add_executor_job(self.setup)
def setup(self) -> None:
"""Set up FritzboxTools class."""
self.connection = FritzConnection(
address=self.host,
port=self.port,
user=self.username,
password=self.password,
timeout=60.0,
pool_maxsize=30,
)
if not self.connection:
_LOGGER.error("Unable to establish a connection with %s", self.host)
return
self.fritz_status = FritzStatus(fc=self.connection)
info = self.connection.call_action("DeviceInfo:1", "GetInfo")
if not self._unique_id:
self._unique_id = info["NewSerialNumber"]
self._model = info.get("NewModelName")
self._current_firmware = info.get("NewSoftwareVersion")
self._update_available, self._latest_firmware = self._update_device_info()
@callback
async def _async_update_data(self) -> None:
"""Update FritzboxTools data."""
try:
self.fritz_hosts = FritzHosts(fc=self.connection)
await self.async_scan_devices()
except (FritzSecurityError, FritzConnectionException) as ex:
raise update_coordinator.UpdateFailed from ex
@property
def unique_id(self) -> str:
"""Return unique id."""
if not self._unique_id:
raise ClassSetupMissing()
return self._unique_id
@property
def model(self) - | > str:
"""Return device model."""
if not self._model:
raise ClassSetupMissing()
return self._model
@property
def current_firmware(self) -> str:
"""Return current SW version."""
if not self._current_firmware:
raise ClassSetupMissing()
return self._current_firmware
@property
def latest_firmware(self) -> str | None:
| """Return latest SW version."""
return self._latest_firmware
@property
def update_available(self) -> bool:
"""Return if new SW version is available."""
return self._update_available
@property
def mac(self) -> str:
"""Return device Mac address."""
if not self._unique_id:
raise ClassSetupMissing()
return self._unique_id
@property
def devices(self) -> dict[str, FritzDevice]:
"""Return devices."""
return self._devices
@property
def signal_device_new(self) -> str:
"""Event specific per FRITZ!Box entry to signal new device."""
return f"{DOMAIN}-device-new-{self._unique_id}"
@property
def signal_device_update(self) -> str:
"""Event specific per FRITZ!Box entry to signal updates in devices."""
return f"{DOMAIN}-device-update-{self._unique_id}"
def _update_hosts_info(self) -> list[HostInfo]:
"""Retrieve latest hosts information from the FRITZ!Box."""
try:
return self.fritz_hosts.get_hosts_info() # type: ignore [no-any-return]
except Exception as ex: # pylint: disable=[broad-except]
if not self.hass.is_stopping:
raise HomeAssistantError("Error refreshing hosts info") from ex
return []
def _update_device_info(self) -> tuple[bool, str | None]:
"""Retrieve latest device information from the FRITZ!Box."""
version = self.connection.call_action("UserInterface1", "GetInfo").get(
"NewX_AVM-DE_Version"
)
return bool(version), version
async def async_scan_devices(self, now: datetime | None = None) -> None:
|
mozilla/github-org-scripts | check_CoC.py | Python | mpl-2.0 | 195 | 0 | ################## | ##############################
#
# file moved to own repository:
# https://github.com/mozilla/Mozilla-GitHub-Standards
#
########################## | ######################
|
plotly/python-api | packages/python/plotly/plotly/validators/splom/dimension/_valuessrc.py | Python | mit | 466 | 0 | import _plotly_utils.basevalidators
class ValuessrcValidator(_plotly_utils.basevalidators.SrcValidator):
| def __init__(
self, plotly_name="valuessrc", parent_name="splom.dimension" | , **kwargs
):
super(ValuessrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
tddv/readthedocs.org | readthedocs/donate/apps.py | Python | mit | 202 | 0 | from django.apps import AppConfig
class DonateAppConfig(AppConfig):
name | = 'readthedocs.donate'
verbose_name = 'Do | nate'
def ready(self):
import readthedocs.donate.signals # noqa
|
gltn/stdm | stdm/third_party/sqlalchemy/testing/plugin/plugin_base.py | Python | gpl-2.0 | 20,361 | 0 | # plugin/plugin_base.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Testing extensions.
this module is designed to work as a testing-framework-agnostic library,
created so that multiple test frameworks can be supported at once
(mostly so that we can migrate to new ones). The current target
is pytest.
"""
from __future__ import absolute_import
import abc
import re
import sys
py3k = sys.version_info >= (3, 0)
if py3k:
import configparser
ABC = abc.ABC
else:
import ConfigParser as configparser
import collections as collections_abc # noqa
class ABC(object):
__metaclass__ = abc.ABCMeta
# late imports
fixtures = None
engines = None
exclusions = None
warnings = None
profiling = None
assertions = None
requirements = None
config = None
testing = None
util = None
file_config = None
logging = None
include_tags = set()
exclude_tags = set()
options = None
def setup_options(make_option):
make_option(
"--log-info",
action="callback",
type="string",
callback=_log,
help="turn on info logging for <LOG> (multiple OK)",
)
make_option(
"--log-debug",
action="callback",
type="string",
callback=_log,
help="turn on debug logging for <LOG> (multiple OK)",
)
make_option(
"--db",
action="append",
type="string",
dest="db",
help="Use prefab database uri. Multiple OK, "
"first one is run by default.",
)
make_option(
"--dbs",
action="callback",
zeroarg_callback=_list_dbs,
help="List available prefab dbs",
)
make_option(
"--dburi",
action="append",
type="string",
dest="dburi",
help="Database uri. Multiple OK, " "first one is run by default.",
)
make_option(
"--dropfirst",
action="store_true",
dest="dropfirst",
help="Drop all tables in the target database first",
)
make_option(
"--backend-only",
action="store_true",
dest="backend_only",
help="Run only tests marked with __backend__ or __sparse_backend__",
)
make_option(
"--nomemory",
action="store_true",
dest="nomemory",
help="Don't run memory profiling tests",
)
make_option(
"--postgresql-templatedb",
type="string",
help="name of template database to use for PostgreSQL "
"CREATE DATABASE (defaults to current database)",
)
make_option(
"--low-connections",
action="store_true",
dest="low_connections",
help="Use a low number of distinct connections - "
"i.e. for Oracle TNS",
)
make_option(
"--write-idents",
type="string",
dest="write_idents",
help="write out generated follower idents to <file>, "
"when -n<num> is used",
)
make_option(
"--reversetop",
action="store_true",
dest="reversetop",
default=False,
help="Use a random-ordering set implementation in the ORM "
"(helps reveal dependency issues)",
)
make_option(
"--requirements",
action="callback",
type="string",
callback=_requirements_opt,
help="requirements class for testing, overrides setup.cfg",
)
make_option(
"--with-cdecimal",
action="store_true",
dest="cdecimal",
default=False,
help="Monkeypatch the cdecimal library into Python 'decimal' "
"for all tests",
)
make_option(
"--include-tag",
action="callback",
callback=_include_tag,
type="string",
help="Include tests with tag <tag>",
)
make_option(
"--exclude-tag",
action="callback",
callback=_exclude_tag,
type="string",
help="Exclude tests with tag <tag>",
)
make_option(
"--write-profiles",
action="store_true",
dest="write_profiles",
default=False,
help="Write/update failing profiling data.",
)
make_option(
"--force-write-profiles",
action="store_true",
dest="force_write_profiles",
default=False,
help="Unconditionally write/update profiling data.",
)
def configure_follower(follower_ident):
"""Configure required state for a follower.
This invokes in the parent process and typically includes
database creation.
"""
from sqlalchemy.testing import provision
provision.FOLLOWER_IDENT = follower_ident
def memoize_important_follower_config(dict_):
"""Store important configuration we will need to send to a follower.
This invokes in the parent process after normal config is set up.
This is necessary as pytest seems to not be using forking, so we
start with nothing in memory, *but* it isn't running our argparse
callables, so we have to just copy all of that over.
"""
dict_["memoized_config"] = {
"include_tags": include_tags,
"exclude_tags": exclude_tags,
}
def restore_important_follower_config(dict_):
"""Restore important configuration needed by a follower.
This invokes in the follower process.
"""
global include_tags, exclude_tags
include_tags.update(dict_["memoized_config"]["include_tags"])
exclude_tags.update(dict_["memoized_config"]["exclude_tags"])
def read_config():
global file_config
file_config = configparser.ConfigParser()
file_config.read(["setup.cfg", "test.cfg"])
def pre_begin(opt):
"""things to set up early, before coverage might be setup."""
global options
options = opt
for fn in pre_configure:
fn(options, file_config)
def set_coverage_flag(value):
options.has_coverage = value
def post_begin():
"""things to set up later, once we know coverage is running."""
# Lazy setup of other options (post coverage)
for fn in post_configure:
fn(options, file_config)
# late imports, has to happen after config.
global util, fixtures, engines, exclusions, assertions
global warnings, profiling, config, testing
from sqlalchemy import testing # noqa
from sqlalchemy.testing import fixtures, engines, exclusions # noqa
from sqlalchemy.testing import assertions, warnings, profiling # noqa
from sqlalchemy.testing import config # noqa
from sqlalchemy import util # noqa
warnings.setup_filters()
def _log(opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith("-info"):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith("-debug"):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print("Available --db options (use --dburi to override)")
for macro in sorted(file_config.options("db")):
| print("%20s\t%s" % (macro, file_config.get("db", macro)))
sys.exit(0)
def _requirements_opt(opt_str, value, parser):
_setup_requirements(value)
def _exclude_tag(opt_str, value, parser):
exclude_tags.add(value.replace("-", "_"))
def _include_tag(opt_str, value, parser):
include_tags.add(value.replace("-", "_"))
pre_configure = []
post_configure = []
def pre(fn):
pre_co | nfigure.append(fn)
return fn
def post(fn):
post_configure.append(fn)
return fn
@pre
def _setup_options(opt, file_config):
global options
options = opt
@pre
def _set_nomemory(opt, file_config):
if opt.nomemory:
exclude_tags.add("memory_intensive")
@pre
def _monkeypatch_cdecimal(options, file_config):
if options.cdecimal:
import cdecimal
sys.modules["decimal"] = cdecimal
@post
def _init_symbols(options, file_config):
from sqlalchemy.testing import config
config._fixture_functions = _fixture_fn_class()
@post
def _engine_uri(options, file_config):
from sqlal |
makyo/honeycomb | usermgmt/templatetags/profile_extras.py | Python | mit | 914 | 0 | from django import template
from django.template.defaultfilters import stringfilter
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from usermgmt import utils
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def render_attributes(value, autoescape=True):
"""A filter for changing a list of user attributes into a list of links,
data, | etc.
"""
# TODO
# @makyo 2016-11-06 #63
if value == '':
return 'No attributes'
to_return = '<dl>'
for attribute in value.split('\n'):
k, v = attribute.split('=', 1)
if k in utils.ATTRIBUTES:
to_return += '<dt>{}</dt>'.format(utils.ATTRIBUTES[k]['dt'])
to_return += '<dd>{}</dd>'.format(
| utils.ATTRIBUTES[k]['dd'].format(value=conditional_escape(v)))
to_return += '</dl>'
return mark_safe(to_return)
|
sagiss/sardana | scripts/upgrade/upgradeCLI.py | Python | lgpl-3.0 | 1,360 | 0.013235 | #!/usr/bin/env python
import sys, time
import PyTango
from upgrade_utils import *
def upgrade(serv, old_vers, new_vers):
u_kcls = get_suitable_upgrade(old_vers, new_vers)
u_obj = u_kcls()
db = PyTango.Database()
print
for msg, perc in u_obj.upgrade(db, serv, old_vers, new_vers):
msg = "\033[1F\033[2K%-70s" % msg
msg = "%s [ %03d%s ]" % (msg, perc, '%')
print msg
time.sleep(0.1)
def main():
serv = ""
vers = "0.0.0"
if len(sys.argv) > 1: serv = sys.argv[1]
if len(sy | s.argv) > 2: vers = sys.argv[2]
pool_serv_list = get_server_list()
while not serv in pool_serv_list:
print_list(pool_serv_list)
serv = raw_input("Which instance you want to upgrade? ")
old_vers = get_pool_server_version(serv)
print "Current version of %s is %s" % (serv, old_vers)
possible_upgrades = get_possible_upgrades(serv)
if not possible_upgrades:
print "Could not find a | suitable upgrade plugin for version %s.\nUpgrade FAILED" % old_vers
return
while not get_suitable_upgrade(old_vers, vers):
print_list(possible_upgrades)
vers = raw_input("To which version you want to upgrade %s (bigger than %s)? " % (serv, old_vers))
upgrade(serv, old_vers, vers)
if __name__ == "__main__":
main() |
catapult-project/catapult | third_party/google-endpoints/google/api/config/service_config.py | Python | bsd-3-clause | 4,729 | 0.006344 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/l | icenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a method for fetching Service Configuration from Google Service
Management API."""
import logging
import json
import os
import urllib3
from apitools.base.py import encoding
import google.api.gen.servicecontrol_v1_messages as messages
from oauth2client import client
from urllib3.contrib import appengine
logger = logging.getLogger(__name__)
_GOOGLE_API_SCOPE = "https://www.googleapis.com/auth/cloud-platform"
_SERVICE_MGMT_URL_TEMPLATE = ("https://servicemanagement.googleapis.com"
"/v1/services/{}/configs/{}")
_SERVICE_NAME_ENV_KEY = "ENDPOINTS_SERVICE_NAME"
_SERVICE_VERSION_ENV_KEY = "ENDPOINTS_SERVICE_VERSION"
def fetch_service_config(service_name=None, service_version=None):
"""Fetches the service config from Google Serivce Management API.
Args:
service_name: the service name. When this argument is unspecified, this
method uses the value of the "SERVICE_NAME" environment variable as the
service name, and raises ValueError if the environment variable is unset.
service_version: the service version. When this argument is unspecified,
this method uses the value of the "SERVICE_VERSION" environment variable
as the service version, and raises ValueError if the environment variable
is unset.
Returns: the fetched service config JSON object.
Raises:
ValueError: when the service name/version is neither provided as an
argument or set as an environment variable; or when the fetched service
config fails validation.
Exception: when the Google Service Management API returns non-200 response.
"""
if not service_name:
service_name = _get_env_var_or_raise(_SERVICE_NAME_ENV_KEY)
if not service_version:
service_version = _get_env_var_or_raise(_SERVICE_VERSION_ENV_KEY)
service_mgmt_url = _SERVICE_MGMT_URL_TEMPLATE.format(service_name,
service_version)
access_token = _get_access_token()
headers = {"Authorization": "Bearer {}".format(access_token)}
http_client = _get_http_client()
response = http_client.request("GET", service_mgmt_url, headers=headers)
status_code = response.status
if status_code != 200:
message_template = "Fetching service config failed (status code {})"
_log_and_raise(Exception, message_template.format(status_code))
logger.debug('obtained service json from the management api:\n%s', response.data)
service = encoding.JsonToMessage(messages.Service, response.data)
_validate_service_config(service, service_name, service_version)
return service
def _get_access_token():
credentials = client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(_GOOGLE_API_SCOPE)
return credentials.get_access_token().access_token
def _get_http_client():
if appengine.is_appengine_sandbox():
return appengine.AppEngineManager()
else:
return urllib3.PoolManager()
def _get_env_var_or_raise(env_variable_name):
if env_variable_name not in os.environ:
message_template = 'The "{}" environment variable is not set'
_log_and_raise(ValueError, message_template.format(env_variable_name))
return os.environ[env_variable_name]
def _validate_service_config(service, expected_service_name,
expected_service_version):
service_name = service.name
if not service_name:
_log_and_raise(ValueError, "No service name in the service config")
if service_name != expected_service_name:
message_template = "Unexpected service name in service config: {}"
_log_and_raise(ValueError, message_template.format(service_name))
service_version = service.id
if not service_version:
_log_and_raise(ValueError, "No service version in the service config")
if service_version != expected_service_version:
message_template = "Unexpected service version in service config: {}"
_log_and_raise(ValueError, message_template.format(service_version))
def _log_and_raise(exception_class, message):
logger.error(message)
raise exception_class(message)
|
csvoss/onelinerizer | tests/scope_comprehension.py | Python | mit | 1,199 | 0.000834 | x = 4
a = lambda: x
# Perhaps it once made sense to someone why c and d see the inner x
# but a and b do not.
print(''.join(
# generator expression
'{}{}{} {}{}{}{}\n'.format(i, j, k, a(), b(), c(), d())
for i, b in enumerate([a, lambda: x, a])
for j, c in enumerate([b, lambda: x, b])
for k, d in enumerate([c, lambda: x, c])
for x in [5]))
print(''.join(sorted({
# set comprehension
'{}{}{} {}{} | {}{}\n'.format(i, j, k, a(), b(), c(), d())
for i, b in enumerate([a, lambda: x, a])
for j, c in enumerate([b, lambda: x, b])
for k, d in enumerate([c, lam | bda: x, c])
for x in [6]})))
print(''.join(sorted({
# dict comprehension
'{}{}{} {}{}{}{}\n'.format(i, j, k, a(), b(), c(), d()): 1
for i, b in enumerate([a, lambda: x, a])
for j, c in enumerate([b, lambda: x, b])
for k, d in enumerate([c, lambda: x, c])
for x in [7]}.keys())))
# Except in list comprehensions.
print(''.join([
# list comprehension
'{}{}{} {}{}{}{}\n'.format(i, j, k, a(), b(), c(), d())
for i, b in enumerate([a, lambda: x, a])
for j, c in enumerate([b, lambda: x, b])
for k, d in enumerate([c, lambda: x, c])
for x in [8]]))
|
Davidrjx/mongoengine | mongoengine/document.py | Python | mit | 43,855 | 0.000638 | import re
import warnings
from bson.dbref import DBRef
import pymongo
from pymongo.read_preferences import ReadPreference
import six
from mongoengine import signals
from mongoengine.base import (BaseDict, BaseDocument, BaseList,
DocumentMetaclass, EmbeddedDocumentList,
TopLevelDocumentMetaclass, get_document)
from mongoengine.common import _import_class
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
from mongoengine.context_managers import switch_collection, switch_db
from mongoengine.errors import (InvalidDocumentError, InvalidQueryError,
SaveConditionError)
from mongoengine.python_support import IS_PYMONGO_3
from mongoengine.queryset import (NotUniqueError, OperationError,
QuerySet, transform)
__all__ = ('Document', 'EmbeddedDocument', 'DynamicDocument',
'DynamicEmbeddedDocument', 'OperationError',
'InvalidCollectionError', 'NotUniqueError', 'MapReduceDocument')
def includes_cls(fields):
"""Helper function used for ensuring and comparing indexes."""
first_field = None
if len(fields):
if isinstance(fields[0], six.string_types):
first_field = fields[0]
elif isinstance(fields[0], (list, tuple)) and len(fields[0]):
first_field = fields[0][0]
return first_field == '_cls'
class InvalidCollectionError(Exception):
pass
class EmbeddedDocument(BaseDocument):
"""A :class:`~mongoengine.Document` that isn't stored in its own
collection. :class:`~mongoengine.EmbeddedDocument`\ s should be used as
fields on :class:`~mongoengine.Document`\ s through the
:class:`~mongoengine.EmbeddedDocumentField` field type.
A :class:`~mongoengine.EmbeddedDocument` subclass may be itself subclassed,
to create a specialised version of the embedded document that will be
stored in the same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To enable this behaviour set :attr:`allow_inheritance` to ``True`` in the
:attr:`meta` dictionary.
"""
__slots__ = ('_instance', )
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
__metaclass__ = DocumentMetaclass
# A generic embedded document doesn't have any immutable properties
# that describe it uniquely, hence it shouldn't be hashable. You can
# define your own __hash__ method on a subclass if you need your
# embedded documents to be hashable.
__hash__ = None
def __init__(self, *args, **kwargs):
super(EmbeddedDocument, self).__init__(*args, **kwargs)
self._instance = None
self._changed_fields = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._data == other._data
return False
def __ne__(self, other):
return not self.__eq__(other)
def to_mongo(self, *args, **kwargs):
data = super(EmbeddedDocument, self).to_mongo(*args, **kwargs)
# remove _id from the SON if it's in it and it's None
if '_id' in data and data['_id'] is None:
del data['_id']
return data
def save(self, *args, **kwargs):
self._instance.save(*args, **kwargs)
def reload(self, *args, **kwargs):
self._instance.reload(*args, **kwargs)
class Document(BaseDocument):
"""The base class used for defining the structure and properties of
collections of documents stored in MongoDB. Inherit from this class, and
add fields as class attributes to define a document's structure.
Individual documents may then be created by making instances of the
:class:`~mongoengine.Document` subclass.
By default, the MongoDB collection used to store documents created using a
:class:`~mongoengine.Document` subclass will be the name of the subclass
converted to lowercase. A different collection may be specified by
providing :attr:`collection` to the :attr:`meta` dictionary in the class
definition.
A :class:`~mongoengine.Document` subclass may be itself subclassed, to
create a specialised version of the document that will be stored in the
same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To enable this behaviourset :attr:`allow_inheritance` to ``True`` in the
:attr:`meta` dictionary.
A :class:`~mongoengine.Document` may use a **Capped Collection** by
specifying :attr:`max_documents` and :attr:`max_size` in the :attr:`meta`
dictionary. :attr:`max_documents` is the maximum number of documents that
is allowed to be stored in the collection, and :attr:`max_size` is the
maximum size of the collection in bytes. :attr:`max_size` is rounded up
to the next multiple of 256 by MongoDB internally and mongoengine before.
Use also a multiple of 256 to avoid confusions. If :attr:`max_size` is not
specified and :attr:`max_documents` is, :attr:`max_size` defaults to
10485760 bytes (10MB).
Indexes may be created by specifying :attr:`indexes` in the :attr:`meta`
dictionary. The value should be a list of field names or tuples of field
names. Index direction may be specified by prefixing the field names with
a **+** or **-** sign.
Automatic index creation can be disabled by specifying
:attr:`auto_create_index` in the :attr:`meta` dictionary. If this is set to
False then indexes will not be created by MongoEngine. This is useful in
production systems where index creation is performed as part of a
deployment system.
By default, _cls will be added to the start of every index (that
doesn't contain a list) if allow_inheritance is True. This can be
disabled by either setting cls to False on the specific index or
by setting index_cls to False on the meta dictionary for the document.
By default, any extra attribute existing in stored data but not declared
in your model will raise a :class:`~mongoengine.FieldDoesNotExist` error.
This can be disabled by setting :attr:`strict` to ``False``
in the :attr:`meta` dictionary.
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
__metaclass__ = TopLevelDocumentMetaclass
__slots__ = ('__objects',)
@property
def pk(self):
"""Get the primary key."""
if 'id_field' not in self._meta:
return None
return getattr(self, self._meta['id_field'])
@pk.setter
def pk(self, value):
"""Set the primary key."""
return setattr(self, self._meta['id_field'], value)
def __hash__(self):
"""Return the hash based on the PK of this document. If it's new
and doesn't have a PK yet, return the default object hash instead.
"""
if self.pk is None:
return super(BaseDocument, self).__hash__()
else:
return hash(self.pk)
@classmethod
def _get_db(cls):
"""Some Model using other db_alias"""
return get_db(cls._meta.get('db_alias', DEFAULT_CONNECTION_NAME))
@classmethod
def _get_collection(cls):
"""Return a PyMongo collection for the document."""
if not hasattr(cls, '_collection') or cls._collection is None:
# Get the collection, either capped or regular.
if cls._meta.get('max_size') or cls._meta.get('max_documents'):
cls._collec | tion = cls._get_capped_collection()
else:
db = cls._get_db()
collection_name = cls._get_collection_name()
| cls._collection = db[collection_name]
# Ensure indexes on the collection unless auto_create_index was
# set to False.
if cls._meta.get('auto_create_index', T |
a25kk/newe | src/newe.sitecontent/newe/sitecontent/browser/showroom.py | Python | mit | 7,487 | 0 | # -*- coding: utf-8 -*-
"""Module providing views for the folderish content page type"""
import json
import urllib
from Acquisition import aq_inner
from Products.Five.browser import BrowserView
from plone import api
from plone.i18n.normalizer.interfaces import IIDNormalizer
from zope.component import getUtility
from newe.sitecontent import utils
from newe.sitecontent.showroom import IShowRoom
from newe.sitecontent.project import IProject
class ShowRoomView(BrowserView):
""" Show room default view """
def render(self):
return self.index()
def __call__(self):
self.has_showrooms = len(self.showrooms()) > 0
self.has_subitems = len(self.subitems()) > 0
return self.render()
def showroom_content(self):
context = aq_inner(self.context)
template = context.restrictedTraverse('@@showroom-content')()
return template
def showrooms(self):
context = aq_inner(self.context)
return context.restrictedTraverse('@@folderListing')(
portal_type='newe.sitecontent.showroom',
review_state='published')
def projects(self):
context = aq_inner(self.context)
return context.restrictedTraverse('@@folderListing')(
portal_type='newe.sitecontent.project',
review_state='published')
def subitems(self):
""" A showroom containing other showrooms
should not list contained projects
"""
if self.has_showrooms:
return self.showrooms()
return self.projects()
def _project_assets(self, uuid):
project = api.content.get(UID=uuid)
data = getattr(project, 'assets')
if data is None:
data = dict()
return data
def _assets(self, uuid):
return json.loads(self._project_assets(uuid))
def has_preview_image(self, uuid):
""" Test if we have an available preview image """
if len(self._project_assets(uuid)):
assets = self._assets(uuid)
return len(assets['items']) > 0
return False
def get_preview_container(self, uuid):
data = self._assets(uuid)
items = data['items']
return items[0]
def rendered_preview_image(self, uuid):
item = api.content.get(UID=uuid)
return item.restrictedTraverse('@@stack-preview')()
def normalize_subject(self, subject):
""" Normalizer for project filter categories
This function is called by the isotope filter navigation
"""
normalizer = getUtility(IIDNormalizer)
return normalizer.normalize(subject)
def url_encode_subject_query(self, subject):
""" Quote subject query string """
return urllib.quote(subject)
def computed_class(self, uuid):
item = api.content.get(UID=uuid)
klass = 'app-card-{0}'.format(uuid)
subjects = item.Subject()
for subject in subjects:
pretty_subject = self.normalize_subject(subject)
klass = '{0} {1}'.format(klass, pretty_subject)
return klass
def available_filter(self):
context = aq_inner(self.context)
context_subjects = utils.keywords_filtered_by_context(context)
return context_subjects
def filter_map(self):
idx = 0
mapping = {}
for subject in self.available_filter():
idx += 1
mapping[subject] = idx
return mapping
def filter_map_keys(self):
return self.filter_map().keys()
def item_filter_category(self, uuid):
item = api.content.get(UID=uuid)
subjects = item.Subject()
filter_map = self.filter_map()
if len(subjects) > 1:
item_categories = list()
for subject in subjects:
item_categories.append(filter_map[subject])
return ', '.join(item_categories)
else:
return filter_map[subjects[0]]
class ShowRoomContentView(BrowserView):
""" Embeddable content card listing """
def __call__(self):
self.has_showrooms = len(self.showrooms()) > 0
return self.render()
def render(self):
return self.index()
@property
def traverse_subpath(self):
return self.subpath
def publishTraverse(se | lf, request, name):
if not hasattr(self, 'subpath'):
self.subpath | = []
self.subpath.append(name)
return self
def active_filter_category(self):
try:
active_category = self.traverse_subpath[0]
return active_category
except AttributeError:
return None
def contained_items(self, type_interface):
context = aq_inner(self.context)
query = dict(
context=context,
depth=1,
object_provides=type_interface,
portal_state='published',
sort_on='getObjPositionInParent'
)
if self.active_filter_category():
active_filter = self.active_filter_category()
for key, value in self.filter_map().items():
if str(value) == active_filter:
query['Subject'] = key
items = api.content.find(**query)
return items
def showrooms(self):
return self.contained_items(IShowRoom)
def projects(self):
return self.contained_items(IProject)
def subitems(self):
""" A showroom containing other showrooms
should not list contained projects
"""
if self.has_showrooms:
return self.showrooms()
return self.projects()
def _project_assets(self, uuid):
project = api.content.get(UID=uuid)
data = getattr(project, 'assets', None)
if not data:
data = dict()
return data
def _assets(self, uuid):
return json.loads(self._project_assets(uuid))
def has_preview_image(self, uuid):
""" Test if we have an available preview image """
if len(self._project_assets(uuid)):
assets = self._assets(uuid)
return len(assets['items']) > 0
return False
def get_preview_container(self, uuid):
data = self._assets(uuid)
items = data['items']
return items[0]
def rendered_preview_image(self, uuid):
item = api.content.get(UID=uuid)
return item.restrictedTraverse('@@stack-preview')()
def available_filter(self):
context = aq_inner(self.context)
context_subjects = utils.keywords_filtered_by_context(context)
return context_subjects
def filter_map(self):
idx = 0
mapping = {}
for subject in self.available_filter():
idx += 1
mapping[subject] = idx
return mapping
def filter_map_keys(self):
return self.filter_map().keys()
def normalize_subject(self, subject):
""" Normalizer for project filter categories
This function is called by the isotope filter navigation
"""
normalizer = getUtility(IIDNormalizer)
return normalizer.normalize(subject)
def computed_class(self, uuid):
item = api.content.get(UID=uuid)
klass = 'app-card-{0}'.format(uuid)
subjects = item.Subject()
for subject in subjects:
pretty_subject = self.normalize_subject(subject)
klass = '{0} {1}'.format(klass, pretty_subject)
return klass
|
JoaoRodrigues/pdb-tools | tests/test_pdb_intersect.py | Python | apache-2.0 | 5,216 | 0.000384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 João Pedro Rodrigues
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit Tests for `pdb_intersect`.
"""
import os
import sys
import unittest
from config import data_dir
from utils import OutputCapture
class TestTool(unittest.TestCase):
"""
Generic class for testing tools.
"""
def setUp(self):
# Dynamically import the module
name = 'pdbtools.pdb_intersect'
self.module = __import__(name, fromlist=[''])
def exec_module(self):
"""
Execs module.
"""
with OutputCapture() as output:
try:
self.module.main()
except SystemExit as e:
self.retcode = e.code
self.stdout = output.stdout
self.stderr = output.stderr
return
def test_default(self):
"""$ pdb_intersect data/dummy.pdb data/dummy.pdb"""
# Simulate input
sys.argv = ['', os.path.join(data_dir, 'dummy.pdb'),
os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 188) # no lines deleted. Same file.
self.assertEqual(len(self.stderr), 0) # no errors
atom_names = [l[12:16] for l in self.stdout]
# Test content
atoms_list = [' N ', ' H ', ' H2 ', ' H3 ', ' CA ', ' HA ', ' CB ',
' HB2', ' HB3', ' CG ', ' HG2', ' HG3', ' CD ', ' HD2',
' HD3', ' NE ', ' HE ', ' CZ ', ' NH1', 'HH11', 'HH12',
' NH2', 'HH21', 'HH22', ' C ', ' O ', ' N ', ' H ',
' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ', ' HG2',
' HG3', ' CD ', ' OE1', ' OE2', ' C ', ' O ', ' N ',
' H ', ' CA ', ' HA ', ' CB ', ' HB1', ' HB2', ' HB3',
' C ', ' O ', ' ', ' N ', ' H ', ' H2 ', ' H3 ',
' CA ', ' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ',
' OD1', ' ND2', 'HD21', 'HD22', ' C ', ' O ', ' N ',
' H ', ' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ',
' HG2', ' HG3', ' CD ', ' HD2', ' HD3', ' NE ', ' HE ',
' CZ ', ' NH1', 'HH11', 'HH12', ' NH2', 'HH21', 'HH22',
' C ', ' O ', ' N ', ' H ', ' CA ', ' HA ', ' CB ',
' HB2', ' HB3', ' CG ', ' HG2', ' HG3', ' CD ', ' OE1',
' OE2', ' C ', ' O ', ' ', ' N ', ' H ', ' H2 ',
' H3 ', ' CA ', ' HA ', ' CB ', ' HB2', ' HB3', ' CG ',
' HG2', ' HG3', ' CD ', ' HD2', ' HD3', ' NE ', ' HE ',
' CZ ', ' NH1', 'HH11', 'HH12', ' NH2', 'HH21', 'HH22',
' C ', ' O ', ' N ', ' H ', ' CA ', ' HA ', ' CB ',
' HB2', ' HB3', ' CG ', ' HG2', ' HG3', ' CD ', ' OE1',
' OE2', ' C ', ' O ', ' N ', ' CA ', ' C ', ' O ',
' CB ', ' CG ', ' SD ', ' CE ', ' ', ' P ', ' OP1',
' OP2', " O5'", " C5'", " C4'", " O4'", " C3'", " O3'",
" C2'", " C1'", ' N1 ', ' C2 ', ' O2 ', ' N3 ', ' C4 ',
' O4 ', ' C5 ', ' C7 ', ' C6 ', 'CA ', ' O ', ' O ',
' O ', ' O ', ' O ', ' O ', ' O ', ' O ']
self.assertEqual(atoms_list, atom_names)
def test_file_not_found(self):
"""$ pdb_intersect not_existing.pdb"""
# Error (file not found)
afile = os.path.join(data_dir, 'not_existing.pdb')
sys.argv = ['', afile]
# Execute the script
self.exec_module()
self.assertEqual(self.retcode, 1) # exit code is 1 (error)
self.assertEqual(len(self.stdout), 0) # nothing written to stdout
self.assertEqual(self.stderr[0][:22],
"ERROR!! File not found") # proper error message
def test_help | text(self):
"""$ pdb_intersect"""
sys.argv = ['']
# Execute the script
self.exec_module()
self.assertEqual(self.retcode, 1) # ensure the program exited gracefully.
| self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr, self.module.__doc__.split("\n")[:-1])
if __name__ == '__main__':
from config import test_dir
mpath = os.path.abspath(os.path.join(test_dir, '..'))
sys.path.insert(0, mpath) # so we load dev files before any installation
unittest.main()
|
liberorbis/libernext | env/lib/python2.7/site-packages/celery/utils/log.py | Python | gpl-2.0 | 9,145 | 0.000437 | # -*- coding: utf-8 -*-
"""
celery.utils.log
~~~~~~~~~~~~~~~~
Logging utilities.
"""
from __future__ import absolute_import, print_function
import logging
import numbers
import os
import sys
import threading
import traceback
from contextlib import contextmanager
from billiard import current_process, util as mputil
from kombu.five import values
from kombu.log import get_logger as _get_logger, LOG_LEVELS
from kombu.utils.encoding import safe_str
from celery.five import string_t, text_t
from .term import colored
__all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger',
'set_in_si | ghandler', 'in_sighandler', 'get_logger',
'get_task_logger', 'mlevel', 'ensure_process_aware_logger',
'get_multiprocessing_logger', 'reset_multiprocessing_logger']
_process_aware = False
PY3 | = sys.version_info[0] == 3
MP_LOG = os.environ.get('MP_LOG', False)
# Sets up our logging hierarchy.
#
# Every logger in the celery package inherits from the "celery"
# logger, and every task logger inherits from the "celery.task"
# logger.
base_logger = logger = _get_logger('celery')
mp_logger = _get_logger('multiprocessing')
_in_sighandler = False
def set_in_sighandler(value):
global _in_sighandler
_in_sighandler = value
def iter_open_logger_fds():
seen = set()
loggers = (list(values(logging.Logger.manager.loggerDict)) +
[logging.getLogger(None)])
for logger in loggers:
try:
for handler in logger.handlers:
try:
if handler not in seen:
yield handler.stream
seen.add(handler)
except AttributeError:
pass
except AttributeError: # PlaceHolder does not have handlers
pass
@contextmanager
def in_sighandler():
set_in_sighandler(True)
try:
yield
finally:
set_in_sighandler(False)
def logger_isa(l, p):
this, seen = l, set()
while this:
if this == p:
return True
else:
if this in seen:
raise RuntimeError(
'Logger {0!r} parents recursive'.format(l),
)
seen.add(this)
this = this.parent
return False
def get_logger(name):
l = _get_logger(name)
if logging.root not in (l, l.parent) and l is not base_logger:
if not logger_isa(l, base_logger):
l.parent = base_logger
return l
task_logger = get_logger('celery.task')
worker_logger = get_logger('celery.worker')
def get_task_logger(name):
logger = get_logger(name)
if not logger_isa(logger, task_logger):
logger.parent = task_logger
return logger
def mlevel(level):
if level and not isinstance(level, numbers.Integral):
return LOG_LEVELS[level.upper()]
return level
class ColorFormatter(logging.Formatter):
#: Loglevel -> Color mapping.
COLORS = colored().names
colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'],
'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']}
def __init__(self, fmt=None, use_color=True):
logging.Formatter.__init__(self, fmt)
self.use_color = use_color
def formatException(self, ei):
if ei and not isinstance(ei, tuple):
ei = sys.exc_info()
r = logging.Formatter.formatException(self, ei)
if isinstance(r, str) and not PY3:
return safe_str(r)
return r
def format(self, record):
msg = logging.Formatter.format(self, record)
color = self.colors.get(record.levelname)
# reset exception info later for other handlers...
einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info
if color and self.use_color:
try:
# safe_str will repr the color object
# and color will break on non-string objects
# so need to reorder calls based on type.
# Issue #427
try:
if isinstance(msg, string_t):
return text_t(color(safe_str(msg)))
return safe_str(color(msg))
except UnicodeDecodeError:
return safe_str(msg) # skip colors
except Exception as exc:
prev_msg, record.exc_info, record.msg = (
record.msg, 1, '<Unrepresentable {0!r}: {1!r}>'.format(
type(msg), exc
),
)
try:
return logging.Formatter.format(self, record)
finally:
record.msg, record.exc_info = prev_msg, einfo
else:
return safe_str(msg)
class LoggingProxy(object):
"""Forward file object to :class:`logging.Logger` instance.
:param logger: The :class:`logging.Logger` instance to forward to.
:param loglevel: Loglevel to use when writing messages.
"""
mode = 'w'
name = None
closed = False
loglevel = logging.ERROR
_thread = threading.local()
def __init__(self, logger, loglevel=None):
self.logger = logger
self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel)
self._safewrap_handlers()
def _safewrap_handlers(self):
"""Make the logger handlers dump internal errors to
`sys.__stderr__` instead of `sys.stderr` to circumvent
infinite loops."""
def wrap_handler(handler): # pragma: no cover
class WithSafeHandleError(logging.Handler):
def handleError(self, record):
exc_info = sys.exc_info()
try:
try:
traceback.print_exception(exc_info[0],
exc_info[1],
exc_info[2],
None, sys.__stderr__)
except IOError:
pass # see python issue 5971
finally:
del(exc_info)
handler.handleError = WithSafeHandleError().handleError
return [wrap_handler(h) for h in self.logger.handlers]
def write(self, data):
"""Write message to logging object."""
if _in_sighandler:
return print(safe_str(data), file=sys.__stderr__)
if getattr(self._thread, 'recurse_protection', False):
# Logger is logging back to this file, so stop recursing.
return
data = data.strip()
if data and not self.closed:
self._thread.recurse_protection = True
try:
self.logger.log(self.loglevel, safe_str(data))
finally:
self._thread.recurse_protection = False
def writelines(self, sequence):
"""`writelines(sequence_of_strings) -> None`.
Write the strings to the file.
The sequence can be any iterable object producing strings.
This is equivalent to calling :meth:`write` for each string.
"""
for part in sequence:
self.write(part)
def flush(self):
"""This object is not buffered so any :meth:`flush` requests
are ignored."""
pass
def close(self):
"""When the object is closed, no write requests are forwarded to
the logging object anymore."""
self.closed = True
def isatty(self):
"""Always return :const:`False`. Just here for file support."""
return False
def ensure_process_aware_logger(force=False):
"""Make sure process name is recorded when loggers are used."""
global _process_aware
if force or not _process_aware:
logging._acquireLock()
try:
_process_aware = True
Logger = logging.getLoggerClass()
if getattr(Logger, '_process_aware', False): # pragma: no cover
return
class ProcessAwareLogger(Logger):
_signal_saf |
dsorokin/aivika-modeler | simulation/aivika/modeler/stream_random.py | Python | bsd-3-clause | 7,405 | 0.006212 | # Copyright (c) 2017 David Sorokin <david.sorokin@gmail.com>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.
from simulation.aivika.modeler.model import *
from simulation.aivika.modeler.port import *
from simulation.aivika.modeler.stream import *
from simulation.aivika.modeler.data_type import *
from simulation.aivika.modeler.pdf import *
def uniform_random_stream(transact_type, min_delay, max_delay):
"""Return a new stream of transacts with random delays distributed uniformly."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomUniformStream ' + str(min_delay) + ' ' + str(max_delay)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def uniform_int_random_stream(transact_type, min_delay, max_delay):
"""Return a new stream of transacts with integer random delays distributed uniformly."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomUniformIntStream ' + str(min_delay) + ' ' + str(max_delay)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def triangular_random_stream(transact_type, min_delay, median_delay, max_delay):
"""Return a new stream of transacts with random delays having the triangular distribution."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomTriangularStream ' + str(min_delay) + ' ' + str(median_delay) + ' ' + str(max_delay)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def normal_random_stream(transact_type, mean_delay, delay_deviation):
"""Return a new stream of transacts with random delays having the normal distribution."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomNormalStream ' + str(mean_delay) + ' ' + str(delay_deviation)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def lognormal_random_stream(transact_type, normal_mean_delay, normal_delay_deviation):
"""Return a new stream of transacts with random delays having the lognormal distribution.
The numerical parameters are related to the normal distribution that
this distribution is derived from.
"""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomLogNormalStream ' + str(normal_mean_delay) + ' ' + str(normal_delay_deviation)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def exponential_random_stream(transact_type, mean_delay):
"""Return a new stream of transacts with random delays having the exponential distribution with the specified mean (a reciprocal of the rate)."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomExponentialStream ' + str(mean_delay)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def erlang_random_stream(transact_type, scale, shape):
"""Return a new stream of transacts with random delays having the Erlang distribution with the specified scale (a reciprocal of the rate) and shape parameters."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomErlangStream ' + str( | scale) + ' ' + str(shape)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def poisson_random_stream(transact_type, mean_delay):
"""Return a new stream of transacts with rand | om delays having the Poisson distribution with the specified mean."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomPoissonStream ' + str(mean_delay)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def binomial_random_stream(transact_type, probability, trials):
"""Return a new stream of transacts with random delays having the binomial distribution with the specified probability and trials."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomBinomialStream ' + str(probability) + ' ' + str(trials)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def gamma_random_stream(transact_type, shape, scale):
"""Return a new stream of transacts with random delays having the Gamma distribution by the specified shape and scale."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomGammaStream ' + str(shape) + ' ' + str(scale)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def beta_random_stream(transact_type, alpha, beta):
"""Return a new stream of transacts with random delays having the Beta distribution by the specified shape parameters (alpha and beta)."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomBetaStream ' + str(alpha) + ' ' + str(beta)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def weibull_random_stream(transact_type, shape, scale):
"""Return a new stream of transacts with random delays having the Weibull distribution by the specified shape and scale."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomWeibullStream ' + str(shape) + ' ' + str(scale)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
def discrete_random_stream(transact_type, pdf):
"""Return a new stream of transacts with random delays having the discrete distribution by the specified probability density function."""
expect_transact_type(transact_type)
model = transact_type.get_model()
code = 'return $ mapStream (\\a -> ' + transact_type.coerce_arrival('a') + ') $ '
code += 'randomDiscreteStream ' + encode_pdf(pdf)
y = StreamPort(model, transact_type.get_data_type())
y.bind_to_input()
y.write(code)
return y
|
timothyparez/PyBitmessage | src/class_receiveDataThread.py | Python | mit | 46,068 | 0.006556 | doTimingAttackMitigation = False
import base64
import errno
import math
import time
import threading
import shared
import hashlib
import os
import select
import socket
import random
import ssl
from struct import unpack, pack
import sys
import traceback
from binascii import hexlify
#import string
#from subprocess import call # used when the API must execute an outside program
#from pyelliptic.openssl import OpenSSL
#import highlevelcrypto
from addresses import *
from class_objectHashHolder import objectHashHolder
from helper_generic import addDataPadding, isHostInPrivateIPRange
from helper_sql import sqlQuery
from debug import logger
# This thread is created either by the synSenderThread(for outgoing
# connections) or the singleListenerThread(for incoming connections).
class receiveDataThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, name="receiveData")
self.data = ''
self.verackSent = False
self.verackReceived = False
def setup(
self,
sock,
HOST,
port,
streamNumber,
someObjectsOfWhichThisRemoteNodeIsAlreadyAware,
selfInitiatedConnections,
sendDataThreadQueue,
objectHashHolderInstance):
self.sock = sock
self.peer = shared.Peer(HOST, port)
self.name = "receiveData-" + self.peer.host.replace(":", ".") # ":" log parser field separator
self.streamNumber = streamNumber
self.objectsThatWeHaveYetToGetFromThisPeer = {}
self.selfInitiatedConnections = selfInitiatedConnections
self.sendDataThreadQueue = sendDataThreadQueue # used to send commands and data to the sendDataThread
shared.connectedHostsList[
self.peer.host] = 0 # The very fact that this receiveData thread exists shows that we are connected to the remote host. Let's add it to this list so that an outgoingSynSender thread doesn't try to connect to it.
self.connectionIsOrWasFullyEstablished = False # set to true after the remote node and I accept each other's version messages. This is needed to allow the user interface to accurately reflect the current number of connections.
self.services = 0
if self.streamNumber == -1: # This was an incoming connection. Send out a version message if we accept the other node's version message.
self.initiatedConnection = False
else:
self.initiatedConnection = True
self.selfInitiatedConnections[streamNumber][self] = 0
self.someObjectsOfWhichThisRemoteNodeIsAlreadyAware = someObjectsOfWhichThisRemoteNodeIsAlreadyAware
self.objectHashHolderInstance = objectHashHolderInstance
self.startTime = time.time()
def run(self):
logger.debug('receiveDataThread starting. ID ' + str(id(self)) + '. The size of the shared.connectedHostsList is now ' + str(len(shared.connectedHostsList)))
while True:
if shared.config.getint('bitmessagesettings', 'maxdownloadrate') == 0:
downloadRateLimitBytes = float("inf")
else:
downloadRateLimitBytes = shared.config.getint('bitmessagesettings', 'maxdownloadrate') * 1000
with shared.receiveDataLock:
while shared.numberOfBytesReceivedLastSecond >= downloadRateLimitBytes:
if int(time.time()) == shared.lastTimeWeResetBytesReceived:
# If it's still the same second that it was last time then sleep.
time.sleep(0.3)
else:
# It's a new second. Let us clear the shared.numberOfBytesReceivedLastSecond.
shared.lastTimeWeResetBytesReceived = int(time.time())
shared.numberOfBytesReceivedLastSecond = 0
dataLen = len(self.data)
try:
if ((self.services & shared.NODE_SSL == shared.NODE_SSL) and
self.connectionIsOrWasFullyEstablished and
shared.haveSSL(not self.initiatedConnection)):
dataRecv = self.sslSock.recv(1024)
else:
dataRecv = self.sock.recv(1024)
self.data += dataRecv
shared.numberOfBytesReceived += len(dataRecv) # for the 'network status' UI tab. The UI clears this value whenever it updates.
shared.numberOfBytesReceivedLastSecond += len(dataRecv) # for the download rate limit
except socket.timeout:
logger.error ('Timeout occurred waiting for data from ' + str(self.peer) + '. Closing receiveData thread. (ID: ' + str(id(self)) + ')')
break
except Exception as err:
if (sys.platform == 'win32' and err.errno in ([2, 10035])) or (sys.platform != 'win32' and err.errno == errno.EWOULDBLOCK):
select.select([self.sslSock], [], [])
continue
logger.error('sock.recv error. Closing receiveData thread (' + str(self.peer) + ', Thread ID: ' + str(id(self)) + ').' + str(err.errno) + "/" + str(err))
break
# print 'Received', repr(self.data)
if len(self.data) == dataLen: # If self.sock.recv returned no data:
logger.debug('Connection to ' + str(self.peer) + ' closed. Closing receiveData thread. (ID: ' + str(id(self)) + ')')
break
else:
self.processData()
try:
del self.selfInitiatedConnections[self.streamNumber][self]
logger.debug('removed self (a receiveDataThread) from selfInitiatedConnections')
except:
pass
self.sendDataThreadQueue.put((0, 'shutdown','no data')) # commands the corresponding sendDataThread to shut itself down.
try:
del shared.connectedHostsList[self.peer.host]
except Exception as err:
logger.error('Could not delete ' + str(self.peer.host) + ' from shared.connectedHostsList.' + str(err))
try:
del shared.numberOfObjectsThatWeHaveYetToGetPerPeer[
self.peer]
except:
pass
shared.UISignalQueue.put(('updateNetworkStatusTab', 'no data'))
logger.debug('receiveDataThread ending. ID ' + str(id(self)) + '. The size of the shared.connectedHostsList is now ' + str(len(shared.connectedHostsList)))
def antiIntersectionDelay(self, initial = False):
# estimated time for a small object to propagate across the whole network
delay = math.ceil(math.log(len(shared.knownNodes[self.streamNumber]) + 2, 20)) * (0.2 + objectHashHolder.size/2)
# +2 is to avoid problems with log(0) and log(1)
# 20 is avg connected nodes count
# 0.2 is avg message transmission time
now = time.time()
if initial and now - delay < self.startTime:
logger.debug("Initial sleeping for %.2fs", delay - (now - self.startTime))
time.sleep(delay - (now - self.startTime))
elif not initial:
logger.debug("Sleeping due to missing object for %.2fs", delay)
time.sleep(delay)
def processData(self):
if len(self.data) < shared.Hea | der.size: # if so little of the data has arrived that we can't even read the checksum then wait for more data.
return
magic,command,payloadLength,checksum = shared.Header.unpack(self.data[:shared.Header.size])
| if magic != 0xE9BEB4D9:
self.data = ""
return
if payloadLength > 1600100: # ~1.6 MB which is the maximum possible size of an inv message.
logger.info('The incoming message, which we have not yet download, is too large. Ignoring it. (unfortunately there is no way to tell the other node to stop sending it except to disconnect.) Message size: %s' % payloadLength)
self.data = self.data[payloadLength + shared.Header.size:]
del magic,command,payloadLength,checksum # we don't need these anymore and better to clean them now before the recursive call rather than after
self.processData( |
fengzishiren/pysugar | src/pysugar.py | Python | mit | 5,899 | 0.01242 | # -*- coding: utf-8 -*-
'''
Created on 2014年7月23日
@author: fengzishiren
@mail: xiaoyaozi106@163.com
'''
import re
import MySQLdb
import os
import sys
from settings import *
__version__ = '0.1'
__date__ = '2014-07-23'
__updated__ = '2014-07-23'
FORMAT_ARGS = {'author': 'pysugar', 'date': __date__, 'version': __version__}
PATTERN_NAME = re.compile(r'\s(\w+)\(')
PATTERN_ARG = re.compile(r'\((\w+)|(Map<\w+,\w+>)\s(\w+)\)')
class Connection(object):
def connect(self):
host = DATABASE.get('host', '127.0.0.1')
port = DATABASE.get('port', 3306) # oracle default port:10033 mysql:3306
username = DATABASE['username']
password = DATABASE['password']
database = DATABASE['database']
# self.con = MySQLdb.connect(username, password, host + ':' + str(port) + '/' + database)
self.con = MySQLdb.connect(host=host, user=username, passwd=password, db=database, port=port)
self.cursor = self.con.cursor()
return self.cursor
def close(self):
self.con.commit()
self.cursor.close()
self.con.close()
class Token(object):
def __init__(self, name, _type, args):
self.name = name
self.type = _type # insert update delete select
self.args = args # map'or obj'
def say(x):
print x
return x
def col2prop(BIG_ID):
'''
input eg. SEC_PASS
return secPass
'''
bigA = lambda x:''.join((x[0].upper(), x[1:]))
mid = ''.join([bigA(part) for part in BIG_ID.lower().split('_')])
return ''.join((mid[0].lower(), mid[1:]))
def table2bean(table_name, ignore_preffix='tb_'):
'''
input table_name eg. tb_person_relation
return PersonRelation
Note: 'tb_' will be ignored
'''
if table_name.startswith(ignore_preffix):
table_name = table_name[len(ignore_preffix):]
bigA = lambda x:''.join((x[0].upper(), x[1:]))
return ''.join([bigA(c) for c in table_name.split('_')])
def set_format_args(**kwargs):
global FORMAT_ARGS
FORMAT_ARGS.update(kwargs)
def get_tokens(daosrc):
'''
分析格式化后的dao源代码
return Tokens
'''
dao_code = daosrc % FORMAT_ARGS
lines = dao_code.split('\n')
lines = [x.strip() for x in lines if x.strip().startswith('public')]
return map(get_token, lines[1:])
def get_token(line):
line = line[len('public'):].strip()
match = PATTERN_NAME.search(line)
assert match, 'Syntax error!'
name = match.group(1)
| _type = OP_TYPE_DICT.get(name[0:3])
if type == None:
return None
match = PATTERN_ARG.search(line)
assert match, 'Illegal Parameters'
return Token(name, _type, match.group(1).lower())
def gen_bean_name(bean):
'''
eg. UserAccount, userAccount
'''
set_format_args(bean_name = bean, var_name = ''.join((bean[0].lower(), bean[1:])))
def gen_sqls(table_nam | e, tokens):
gs = lambda table_name, token: SQL_CODE[token.type] % \
dict({'method_name': token.name, 'arg_type':token.args, 'table_name':table_name}, **FORMAT_ARGS)
sqls = [gs(table_name, tok) for tok in tokens if tok] # Note: Ignore tok if tok == None
set_format_args(sqls = '\n'.join(sqls))
def get_bean_content(fieldtypes):
fields = ['private %s %s;' % (v, k) for k, v in fieldtypes.items()]
GET_AND_SET = \
"""
public %(type)s get%(Name)s() {
return %(name)s;
}
public void set%(Name)s(%(type)s %(name)s) {
this.%(name)s = %(name)s;
}"""
bigA = lambda x:''.join((x[0].upper(), x[1:]))
content = '\n'.join([GET_AND_SET % {'name': f, 'Name':bigA(f), 'type': t} for f, t in fieldtypes.items()])
return '\n'.join(('\n\t'.join(fields), content))
def gen_map_and_fields(table_name, obtain_type=lambda x: x):
'''
obtain_type 必须是一个函数 用来处理从数据库类型到Java类型的转换
这里obtain_type默认什么也不做 用来处理MySql类型
'''
sql = 'select * from %s' % table_name
con = Connection()
try:
cursor = con.connect()
cursor.execute(sql)
descs = cursor.description
finally:
con.close()
fieldtypes = {col2prop(e[0]): DO_TYPE_DICT.get(obtain_type(e[1]), 'Object') for e in descs}
set_format_args(bean_content = get_bean_content(fieldtypes))
procols = {col2prop(key): key for key in [e[0] for e in descs]}
resultmap = ['<result property="%s" column="%s" />' % (k, v) for k, v in procols.items()]
set_format_args(map = '\n\t'.join(resultmap))
cols = '(%s)' % (','.join(procols.values()))
vals = '(%s)' % (','.join(['#{%s}' % k for k in procols.keys()]))
# cache it
set_format_args(insert_suffix = ' values '.join((cols, vals)))
us = [' = '.join((v, '#{%s}' % k)) for k, v in procols.items()]
# cache it
set_format_args(update_set = ',\n\t'.join(us))
def write_file((filename, content), _dir=OUTPUT_DIR):
if not os.path.exists(_dir):
os.mkdir(_dir)
with open(os.path.join(_dir, filename), 'w') as f:
return f.write(content) # Note: return None
def get_tpl(name):
with open(name) as f:
return f.read()
def main(table_name):
bean = table2bean(table_name)
daosrc = get_tpl(os.path.join(TEMPLATES_DIR, 'dao.tpl'))
gen_bean_name(bean)
gen_map_and_fields(table_name)
gen_sqls(table_name, get_tokens(daosrc))
formatted = {outfile % FORMAT_ARGS : (get_tpl(os.path.join(TEMPLATES_DIR, tpl)) if tpl != 'dao.tpl' else daosrc)\
% FORMAT_ARGS for tpl, outfile in IO_FILE_LIST.items()}
map(write_file, formatted.items())
if __name__ == '__main__':
if sys.argv.__len__() == 3:
main(sys.argv[1])
else:
main('auth_user')
say('bye-bye')
|
CognizantOneDevOps/Insights | PlatformAgents/com/cognizant/devops/platformagents/agents/alm/hp/HpAlmAgent3.py | Python | apache-2.0 | 11,216 | 0.005171 | #-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on Jun 22, 2016
@author: 463188
'''
from ....core.BaseAgent3 import BaseAgent
import xml.etree.ElementTree as ET
import json
class HpAlmAgent(BaseAgent):
@BaseAgent.timed
def getHpAlmSSOHeader(self, baseEndPoint):
self.userid = self.getCredential("userid")
self.passwd = self.getCredential("passwd")
authEndPoint = baseEndPoint + '/qcbin/authentication-point/authenticate'
responseTupple = {}
reqHeaders = {
"Content-Type" : "application/xml",
"Accept" : "application/xml"
}
self.getResponse(authEndPoint, 'POST', userid, passwd, None, reqHeaders=reqHeaders, responseTupple=responseTupple)
ssoCookie = responseTupple['cookies']['LWSSO_COOKIE_KEY']
cookieHeader = {
"Cookie" : 'LWSSO_COOKIE_KEY='+ssoCookie+';'
}
siteSessionEndPoint = baseEndPoint + '/qcbin/rest/site-session'
self.getResponse(siteSessionEndPoint, 'POST', None, None, None, reqHeaders=cookieHeader, responseTupple=responseTupple)
cookieHeader = {
"Cookie" : 'LWSSO_COOKIE_KEY='+ssoCookie+';QCSession='+responseTupple['cookies']['QCSession']
}
return cookieHeader
def extendSession(self, baseEndPoint, cookieHeader):
extendSessionUrl = baseEndPoint + '/qcbin/rest/site-session'
extendSessionResponse = self.getResponse(extendSessionUrl, 'GET', None, None, None, reqHeaders=cookieHeader)
def signOut(self, baseEndPoint):
signOutEndPoint = baseEndPoint + '/qcbin/authentication-point/logout'
self.getResponse(signOutEndPoint, 'GET', None, None, | None, reqHeaders={})
def getDomains(self, baseEndPoint, cookieHeader):
projectsEndPoint = baseEndPoint + '/qcbin/rest/domains?include-projects-info=y&alt=application/'+self.responseType.lower()
| projectResponse = self.getResponse(projectsEndPoint, 'GET', None, None, None, reqHeaders=cookieHeader)
return projectResponse
@BaseAgent.timed
def getProjectDetails(self, baseEndPoint, reqHeaders, domain, project, entityName, fields, startFrom):
domainTracking = self.tracking.get(domain, None)
if domainTracking == None:
domainTracking = {}
self.tracking[domain] = domainTracking
projectTracking = domainTracking.get(project, None)
if projectTracking == None:
projectTracking = {}
domainTracking[project] = projectTracking
entityTracking = projectTracking.get(entityName, None)
projectEndPoint = baseEndPoint + '/qcbin/rest/domains/' + domain + '/projects/' + project + '/' + entityName + '?alt=application/'+self.responseType.lower()+'&'+fields
trackingFieldName = 'last-modified'
if entityName == 'releases':
trackingFieldName = 'start-date'
if entityTracking == None:
projectEndPoint += '&query={'+trackingFieldName+'[>"'+startFrom+'"]}&order-by={'+trackingFieldName+'[ASC]}'
else:
projectEndPoint += '&query={'+trackingFieldName+'[>"'+entityTracking+'"]}&order-by={'+trackingFieldName+'[ASC]}'
entityMetaDetails = self.config.get('dynamicTemplate', {}).get("almEntities").get(entityName)
dataList = []
startIndex = 1
totalResults = 1
loadNextPageResult = True
try:
if self.responseType == 'XML':
while loadNextPageResult:
restUrl = projectEndPoint + '&page-size='+str(self.dataFetchCount)+'&start-index='+str(startIndex)
projectResponse = self.getResponse(restUrl, 'GET', None, None, None, reqHeaders=reqHeaders)
entities = ET.fromstring(projectResponse)
totalResults = int(entities.attrib['TotalResults'])
if totalResults > 0:
entities = list(entities.iter('Entity'))
for entity in entities:
data = {}
data['almDomain'] = domain
data['almProject'] = project
data['almType'] = entity.attrib['Type']
fields = list(entity.iter('Field'))
for field in fields:
fieldName = field.attrib['Name']
propertyName = entityMetaDetails.get(fieldName, None)
if propertyName:
fieldTag = field.find('Value')
if fieldTag is not None:
fieldValue = self.extractValueWithType(fieldTag.text)
data[propertyName] = fieldValue
dataList.append(data)
startIndex += self.dataFetchCount
if totalResults < startIndex:
loadNextPageResult = False
if len(dataList) > 0:
latestRecord = dataList[len(dataList) - 1]
projectTracking[entityName] = latestRecord[entityMetaDetails[trackingFieldName]]
else:
while loadNextPageResult:
restUrl = projectEndPoint + '&page-size='+str(self.dataFetchCount)+'&start-index='+str(startIndex)
projectResponse = self.getResponse(restUrl, 'GET', None, None, None, reqHeaders=reqHeaders)
totalResults = projectResponse.get("TotalResults",0)
if totalResults > 0:
entities = projectResponse.get("entities", [])
for entity in entities:
data = {}
data['domain'] = domain
data['project'] = project
data['type'] = entity['Type']
fields = entity['Fields']
for field in fields:
values = field['values']
for value in values:
propertyName = entityMetaDetails.get(field['Name'], None)
if propertyName:
fieldValue = value.get('value', '')
data[entityMetaDetails[propertyName]] = fieldValue
dataList.append(data)
startIndex += self.dataFetchCount
if totalResults < startIndex:
loadNextPageResult = False
if len(dataList) > 0:
latestRecord = dataList[len(dataList) - 1]
projectTracking[entityName] = latestRecord[entityMetaDetails[trackingFieldName]]
except Exception as ex:
self.baseLogger.error(ex)
return dataList
def extractValueWithType(self, value):
if value is None:
return ''
elif value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
try:
return int(value)
except ValueError:
return value
@BaseAgent.ti |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/drawing/tests/test_layout.py | Python | gpl-3.0 | 15,534 | 0.000515 | """Unit tests for layout functions."""
import networkx as nx
from networkx.testing import almost_equal
import pytest
numpy = pytest.importorskip("numpy")
test_smoke_empty_graphscipy = pytest.importorskip("scipy")
class TestLayout:
@classmethod
def setup_class(cls):
cls.Gi = nx.grid_2d_graph(5, 5)
cls.Gs = nx.Graph()
nx.add_path(cls.Gs, "abcdef")
cls.bigG = nx.grid_2d_graph(25, 25) # > 500 nodes for sparse
@staticmethod
def collect_node_distances(positions):
distances = []
prev_val = None
for k in positions:
if prev_val is not None:
diff = positions[k] - prev_val
distances.append(numpy.dot(diff, diff) ** 0.5)
prev_val = positions[k]
return distances
def test_spring_fixed_without_pos(self):
G = nx.path_graph(4)
pytest.raises(ValueError, nx.spring_layout, G, fixed=[0])
pos = {0: (1, 1), 2: (0, 0)}
pytest.raises(ValueError, nx.spring_layout, G, fixed=[0, 1], pos=pos)
nx.spring_layout(G, fixed=[0, 2], pos=pos) # No ValueError
def test_spring_init_pos(self):
# Tests GH #2448
import math
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (2, 3)])
init_pos = {0: (0.0, 0.0)}
fixed_pos = [0]
pos = nx.fruchterman_reingold_layout(G, pos=init_pos, fixed=fixed_pos)
has_nan = any(math.isnan(c) for coords in pos.values() for c in coords)
assert not has_nan, "values should not be nan"
def test_smoke_empty_graph(self):
G = []
nx.random_layout(G)
nx.circular_layout(G)
nx.planar_layout(G)
nx.spring_layout(G)
nx.fruchterman_reingold_layout(G)
nx.spectral_layout(G)
nx.shell_layout(G)
nx.bipartite_layout(G, G)
nx.spiral_layout(G)
nx.multipartite_layout(G)
nx.kamada_kawai_layout(G)
def test_smoke_int(self):
G = self.Gi
nx.random_layout(G)
nx.circular_layout(G)
nx.planar_layout(G)
nx.spring_layout(G)
nx.fruchterman_reingold_layout(G)
nx.fruchterman_reingold_layout(self.bigG)
nx.spectral_layout(G)
nx.spectral_layout(G.to_directed())
nx.spectral_layout(self.bigG)
nx.spectral_layout(self.bigG.to_directed())
nx.shell_layout(G)
nx.spiral_layout(G)
nx.kamada_kawai_layout(G)
nx.kamada_kawai_layout(G, dim=1)
nx.kamada_kawai_layout(G, dim=3)
def test_smoke_string(self):
G = self.Gs
nx.random_layout(G)
nx.circular_layout(G)
nx.planar_layout(G)
nx.spring_layout(G)
nx.fruchterman_reingold_layout(G)
nx.spectral_layout(G)
nx.shell_layout(G)
nx.spiral_layout(G)
nx.kamada_kawai_layout(G)
nx.kamada_kawai_layout(G, dim=1)
nx.kamada_kawai_layout(G, dim=3)
def check_scale_and_center(self, pos, scale, center):
center = numpy.array(center)
low = center - scale
hi = center + scale
vpos = numpy.array(list(pos.values()))
length = vpos.max(0) - vpos.min(0)
assert (length <= 2 * scale).all()
assert (vpos >= low).all()
assert (vpos <= hi).all()
def test_scale_and_center_arg(self):
sc | = self.check_scale_and_center
c = (4, 5)
G = nx.complete_graph(9)
G.add_node(9)
sc(nx.random_layout(G, center=c), scale=0.5, center=(4.5, 5.5))
# rest can have 2*scale length: [-scale, scale]
sc(nx.spring_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.spectral_layout(G, scale=2, | center=c), scale=2, center=c)
sc(nx.circular_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.shell_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.spiral_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.kamada_kawai_layout(G, scale=2, center=c), scale=2, center=c)
c = (2, 3, 5)
sc(nx.kamada_kawai_layout(G, dim=3, scale=2, center=c), scale=2, center=c)
def test_planar_layout_non_planar_input(self):
G = nx.complete_graph(9)
pytest.raises(nx.NetworkXException, nx.planar_layout, G)
def test_smoke_planar_layout_embedding_input(self):
embedding = nx.PlanarEmbedding()
embedding.set_data({0: [1, 2], 1: [0, 2], 2: [0, 1]})
nx.planar_layout(embedding)
def test_default_scale_and_center(self):
sc = self.check_scale_and_center
c = (0, 0)
G = nx.complete_graph(9)
G.add_node(9)
sc(nx.random_layout(G), scale=0.5, center=(0.5, 0.5))
sc(nx.spring_layout(G), scale=1, center=c)
sc(nx.spectral_layout(G), scale=1, center=c)
sc(nx.circular_layout(G), scale=1, center=c)
sc(nx.shell_layout(G), scale=1, center=c)
sc(nx.spiral_layout(G), scale=1, center=c)
sc(nx.kamada_kawai_layout(G), scale=1, center=c)
c = (0, 0, 0)
sc(nx.kamada_kawai_layout(G, dim=3), scale=1, center=c)
def test_circular_planar_and_shell_dim_error(self):
G = nx.path_graph(4)
pytest.raises(ValueError, nx.circular_layout, G, dim=1)
pytest.raises(ValueError, nx.shell_layout, G, dim=1)
pytest.raises(ValueError, nx.shell_layout, G, dim=3)
pytest.raises(ValueError, nx.planar_layout, G, dim=1)
pytest.raises(ValueError, nx.planar_layout, G, dim=3)
def test_adjacency_interface_numpy(self):
A = nx.to_numpy_array(self.Gs)
pos = nx.drawing.layout._fruchterman_reingold(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._fruchterman_reingold(A, dim=3)
assert pos.shape == (6, 3)
pos = nx.drawing.layout._sparse_fruchterman_reingold(A)
assert pos.shape == (6, 2)
def test_adjacency_interface_scipy(self):
A = nx.to_scipy_sparse_matrix(self.Gs, dtype="d")
pos = nx.drawing.layout._sparse_fruchterman_reingold(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._sparse_spectral(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._sparse_fruchterman_reingold(A, dim=3)
assert pos.shape == (6, 3)
def test_single_nodes(self):
G = nx.path_graph(1)
vpos = nx.shell_layout(G)
assert not vpos[0].any()
G = nx.path_graph(4)
vpos = nx.shell_layout(G, [[0], [1, 2], [3]])
assert not vpos[0].any()
assert vpos[3].any() # ensure node 3 not at origin (#3188)
assert numpy.linalg.norm(vpos[3]) <= 1 # ensure node 3 fits (#3753)
vpos = nx.shell_layout(G, [[0], [1, 2], [3]], rotate=0)
assert numpy.linalg.norm(vpos[3]) <= 1 # ensure node 3 fits (#3753)
def test_smoke_initial_pos_fruchterman_reingold(self):
pos = nx.circular_layout(self.Gi)
npos = nx.fruchterman_reingold_layout(self.Gi, pos=pos)
def test_fixed_node_fruchterman_reingold(self):
# Dense version (numpy based)
pos = nx.circular_layout(self.Gi)
npos = nx.spring_layout(self.Gi, pos=pos, fixed=[(0, 0)])
assert tuple(pos[(0, 0)]) == tuple(npos[(0, 0)])
# Sparse version (scipy based)
pos = nx.circular_layout(self.bigG)
npos = nx.spring_layout(self.bigG, pos=pos, fixed=[(0, 0)])
for axis in range(2):
assert almost_equal(pos[(0, 0)][axis], npos[(0, 0)][axis])
def test_center_parameter(self):
G = nx.path_graph(1)
nx.random_layout(G, center=(1, 1))
vpos = nx.circular_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.planar_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spring_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.fruchterman_reingold_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spectral_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.shell_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spiral_layou |
fae92/TSML | lexer_parser/lexer.py | Python | cc0-1.0 | 1,403 | 0.018532 | # -----------------------------------------------------------------------------
# lexer.py
#
# A simple calculator with variables -- all in one file.
# -----------------------------------------------------------------------------
reserved = {
'class':'CLASS',
'block':'BLOCK',
'event':'EVENT',
'transition':'TRANSITION',
'state':'STATE',
'end':'END',
'synchronization':'SYNCHRONIZATION',
'observer':'OBSERVER'
# 'and':'AND',
# 'or':'OR'
}
tokens = [ 'POINT', 'DEUXPOINTS', 'FLECHE', 'ANDCOMMERCIAL', 'VIRGULE',
'IDENTIFIER' ] + list(reserved.values())
# Tokens
t_VIRGULE = r'\,'
t_DEUXPOINTS = r'\:'
t_ANDCOMMERCIAL = | r'\&'
t_POINT = r'\.'
#t_IDENTIFIER = r'[A-Z][a-zA-Z0-9_]*'
t_FLECHE = r'\->'
# Ignored characters
t_ignore = " \t"
def t_newline(t):
r'\n+'
| t.lexer.lineno += t.value.count("\n")
def t_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'IDENTIFIER') # Check for reserved words
t.value = t.value
return t
def t_error(t):
print("lexer : Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
lex.lex()
file = open("../data/automate.txt")
data=''
while 1:
line = file.readline()
if not line:
break
data += line
#print(data)
lex.input(data)
#while True:
# tok = lex.token()
# print(tok)
# if not tok: break |
gems-uff/noworkflow | npm/watch.py | Python | mit | 2,623 | 0.000762 | import os
import subprocess
from pathlib import Path
from time import sleep
PACKAGES = Path('packages')
class Module:
def __init__(self, name, path=None, files=None, dependencies=None):
self.name = name
if path is None:
path = PACKAGES / name
self.path = path
self.files = files or ["src/", "style/"]
self.dependencies = dependencies or []
self.old_sum = 0
#self.check_dir()
def check_dir(self):
"""Check if a file has changed in the package"""
time_list = []
for file in self.files:
file_list = []
file_path = self.path / Path(file)
if not file.endswith("/"):
file_list = [file_path]
else:
for root, _, files in os.walk(file_path):
root = Path(root)
file_list = [root / f for f in files]
time_list += [os.stat(f).st_mtime for f in file_list]
new_sum = sum(time_list)
result = new_sum != self.old_sum
self.old_sum = new_sum
return result
def run(self):
print("Building", self.name)
process = subprocess.Popen(
"npm run build",
shell=True,
cwd=self.path,
)
status = process.wait()
if status:
raise Exception("NPM run failed")
def check(self, run=True, visited={}):
"""Check if the module or its dependencies has changed"""
if self in visited:
return visited[self]
visited[self] = True
invalid = False
for dependency in self.dependencies:
if not dependency.check(run, visited):
invalid = True
invalid |= self.check_dir()
if run and invalid:
visited[self] = False
self.run()
return not invalid
def __hash__(self):
return hash(self.path)
def __repr__(self):
return "Module({})".format(self.name)
class NoFileModule(Module):
def check_dir(self):
return False
def run(self):
pass
utils = Module("utils")
history = Module("history", dependencies=[utils])
trial = Module("trial", | dependencies | =[utils])
nowvis = Module("nowvis", dependencies=[history, trial])
nbextension = Module("nbextension", dependencies=[history, trial])
ALL = NoFileModule("ALL", dependencies=[nowvis, nbextension])
print("Monitoring packages...")
while True:
visited = {}
try:
ALL.check(visited=visited)
except Exception as e:
print("Failed: {}".format(e))
sleep(1.0)
|
hadim/pygraphml | pygraphml/graph.py | Python | bsd-3-clause | 6,171 | 0.003889 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from . import Node
from . import Edge
from collections import deque
class Graph:
"""
Main class which represent a Graph
:param name: name of the graph
"""
def __init__(self, name=""):
"""
"""
self.name = name
self._nodes = []
self._edges = []
self._root = None
self.directed = True
self.i = 0
def DFS_prefix(self, root=None):
"""
Depth-first search.
.. seealso::
`Wikipedia DFS descritpion <http://en.wikipedia.org/wiki/Depth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self._root
return self._DFS_prefix(root)
def _DFS_prefix(self, n, parent=None):
"""
"""
nodes = [n]
n['depth'] = self.i
for c in n.children():
nodes += self._DFS_prefix(c, n)
self.i += 1
return nodes
def BFS(self, root=None):
"""
Breadth-first search.
.. seealso::
`Wikipedia BFS descritpion <http://en.wikipedia.org/wiki/Breadth-first_search>`_
:param root: first to start the search
:return: list of nodes
"""
if not root:
root = self.root()
queue = deque()
queue.append(root)
nodes = []
while len(queue) > 0:
x = queue.popleft()
nodes.append(x)
for child in x.children():
queue.append(child)
return nodes
def get_depth(self, node):
"""
"""
depth = 0
while node.parent() and node != self.root():
node = node.parent()[0]
depth += 1
return depth
def nodes(self, ):
"""
"""
return self._nodes
def edges(self, ):
"""
"""
return self._edges
def children(self, node):
"""
"""
return node.children()
def add_node(self, label="", id=None):
"""
"""
n = Node(id)
n['label'] = label
self._nodes.append(n)
return n
def add_edge(self, n1, n2, directed=False):
"""
| """
if n1 not in self._nodes:
raise Test("fff")
if n2 not in self._nodes:
raise Test("fff")
e = Edge(n1, n2, directed)
self._edges.append(e)
return e
def add_edge_by_id(self, id1, id2):
try:
n1 = next(n for n in self._nodes if n.id == id1)
except StopIteration:
raise ValueError('Graph has no node with ID {}'.format | (id1))
try:
n2 = next(n for n in self._nodes if n.id == id2)
except StopIteration:
raise ValueError('Graph has no node with ID {}'.format(id2))
return self.add_edge(n1, n2)
def add_edge_by_label(self, label1, label2):
"""
"""
n1 = None
n2 = None
for n in self._nodes:
if n['label'] == label1:
n1 = n
if n['label'] == label2:
n2 = n
if n1 and n2:
return self.add_edge(n1, n2)
else:
return
def set_root(self, node):
"""
"""
self._root = node
def root(self):
"""
"""
return self._root
def set_root_by_attribute(self, value, attribute='label'):
"""
"""
for n in self.nodes():
if n[attribute] in value:
self.set_root(n)
return n
def get_attributs(self):
"""
"""
attr = []
attr_obj = []
for n in self.nodes():
for a in n.attr:
if a not in attr:
attr.append(a)
attr_obj.append(n.attr[a])
for e in self.edges():
for a in e.attr:
if a not in attr:
attr.append(a)
attr_obj.append(e.attr[a])
return attr_obj
def show(self, show_label=False):
"""
"""
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
G = nx.Graph()
for n in self._nodes:
if show_label:
n_label = n['label']
else:
n_label = n.id
G.add_node(n_label)
for e in self._edges:
if show_label:
n1_label = e.node1['label']
n2_label = e.node2['label']
else:
n1_label = e.node1.id
n2_label = e.node2.id
G.add_edge(n1_label, n2_label)
nx.draw(G)
if show_label:
nx.draw_networkx_labels(G, pos=nx.spring_layout(G))
plt.show()
class NoDupesGraph(Graph):
'''Add nodes without worrying if it is a duplicate.
Add edges without worrying if nodes exist '''
def __init__(self,*args,**kwargs):
Graph.__init__(self,*args,**kwargs)
self._nodes = {}
def nodes(self):
return self._nodes.values()
def add_node(self,label):
'''Return a node with label. Create node if label is new'''
try:
n = self._nodes[label]
except KeyError:
n = Node()
n['label'] = label
self._nodes[label]=n
return n
def add_edge(self, n1_label, n2_label,directed=False):
"""
Get or create edges using get_or_create_node
"""
n1 = self.add_node(n1_label)
n2 = self.add_node(n2_label)
e = Edge(n1, n2, directed)
self._edges.append(e)
return e
def flush_empty_nodes(self):
'''not implemented'''
pass
def condense_edges(self):
'''if a node connects to only two edges, combine those
edges and delete the node.
not implemented
'''
pass
|
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py | Python | bsd-3-clause | 3,594 | 0.001948 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vmkernel_ip_config
short_description: Configure the VMkernel IP Address
description:
- Configure the VMkernel IP Address
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
vmk_name:
description:
- VMkernel interface name
required: True
ip_address:
description:
- IP address to assign to VMkernel interface
required: True
subnet_mask:
description:
- Subnet Mask to assign to VMkernel interface
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure IP address on ESX host
local_action:
module: vmware_vmkernel_ip_config
hostname: esxi_hostname
username: esxi_username
password: esxi_password
vmk_name: vmk0
ip_address: 10.0.0.10
subnet_mask: 255.255.255.0
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec
def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
for vnic in host_network_system.networkConfig.vnic:
| if vnic.device == vmk_name:
spec = vnic.spec
if spec.ip.ipAddress != ip_address:
spec.ip.dhcp = False
spec.ip.ipAdd | ress = ip_address
spec.ip.subnetMask = subnet_mask
host_network_system.UpdateVirtualNic(vmk_name, spec)
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmk_name = module.params['vmk_name']
ip_address = module.params['ip_address']
subnet_mask = module.params['subnet_mask']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
dennissv/forestvision | Python/ReadCompute.py | Python | mit | 8,907 | 0.03166 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 00:16:31 2016
@author: Dennis
"""
import numpy as np
import transformations
import utm
import gmplot
from geopy.distance import vincenty
def readINS(filename):
results = []
f = open(filename, 'r')
c = 0
for line in f:
if c < 31:
c += 1
else:
current = []
for i in line.split(' '):
if not i == '':
try:
current.append(float(i.strip('\n')))
except:
None
if len(current)>0:
results.append(current)
f.close()
return np.float64(results)
def readCamera(filename):
results = []
f = open(filename, 'r')
first = True
for line in f:
if not first:
s = [float(i.strip('\n')) for i in line.split('\t')]
current = []
for i in s[3:10]:
current.append(i)
current.append(s[12])
current.append(s[13] | )
current.append(s[14])
results.append(current)
first = False
f.close()
return np.float | 64(results)
def usefulINS(data):
'''
Time (s) | North (m) | East | Altitude | Roll (radians) | Pitch | Yaw
'''
results,raw = [],[]
n0 = data[:,10][0]
e0 = data[:,9][0]
for i in data:
raw.append([i[18]%86400, i[10], i[9], i[1], i[0], \
i[2], i[4], i[13],])
results.append([i[18]%86400, i[10]-n0, i[9]-e0, i[11], i[0]*(np.pi/180), \
i[2]*(np.pi/180), i[4]*(np.pi/180),i[13], i[1], i[3], i[5]])
return np.float64(results),np.float64(raw)
def fixCameraTime(cam):
'''Attempts to fix camera time'''
for i in range(len(cam[:,0])):
cam[:,0][i] += 19
return cam
def setStartAltitudeCamera(cam,ins):
for i in range(len(cam[:,0])):
cam[:,3][i] += ins[:,3][i]
return cam
def syncINStoCamera(cam,ins):
'''Makes the camera and INS data the same length'''
newINS = []
for i in cam[:,0]:
values = [999,998]
c = 0
while values[1] < values[0]:
values[0] = values[1]
values[1] = abs(i-ins[:,0][c])
c += 1
newINS.append(ins[c-2])
try:
ins = ins[c-2:]
except:
None
return np.float64(newINS)
def rotateVector(dvec, eA):
'''Rotates a camera vector according to world orientation'''
dvec = np.float64(dvec)
r = transformations.euler_matrix(eA[0]+np.pi,0,0,axes='sxzy')
rot = np.vstack((np.vstack((r[0][:3],r[1][:3])),r[2][:3]))
dvec = np.dot(dvec,rot)
r = transformations.euler_matrix(0,eA[1],0,axes='sxzy')
rot = np.vstack((np.vstack((r[0][:3],r[1][:3])),r[2][:3]))
dvec = np.dot(dvec,rot)
r = transformations.euler_matrix(0,0,eA[2]-np.pi/2,axes='sxzy')
rot = np.vstack((np.vstack((r[0][:3],r[1][:3])),r[2][:3]))
return np.dot(dvec, rot)
def rotateCamera(cam, ins):
'''Attempts to fix the camera path by rotating its vectors'''
walk = []
result = []
for i in range(len(cam[:,0])):
vec = np.float64([cam[:,7][i],cam[:,8][i],cam[:,9][i]])
eulerAngles = [ins[:,4][i],-0.45-ins[:,5][i],ins[:,6][i]]
walk.append(rotateVector(vec,eulerAngles))
for i in range(len(walk)):
if i == 0:
result.append([cam[:,0][i],walk[i][0],walk[i][1]+cam[:,3][0],walk[i][2],cam[:,4][i], \
cam[:,5][i], cam[:,6][i]])
else:
result.append([cam[:,0][i],result[i-1][1]+walk[i][0],result[i-1][2]+walk[i][1]+0.00535, \
result[i-1][3]+walk[i][2],cam[:,4][i],cam[:,5][i],cam[:,6][i]])
return np.float64(result)
def convertCameratoUTM(cam, rins):
'''Converts camera coordinates to UTM format'''
conv = []
for i in range(len(cam[:,0])):
conv.append([cam[:,0][i],cam[:,1][i]+rins[:,1][0],cam[:,2][i],\
cam[:,3][i]+rins[:,2][0],cam[:,4][i],cam[:,5][i],\
cam[:,6][i]])
return np.float64(conv)
def exportToKalman(cam, ins, filename):
'''Writes camera and INS data to a .csv file to be read by the kalman filter'''
cv = [[0,0,0]]
dv = [[0,0,0]]
final = []
for i in range(1,len(cam[:,0])):
dt = cam[:,0][i]-cam[:,0][i-1]
cv.append([(cam[:,1][i]-cam[:,1][i-1])/dt,(cam[:,3][i]-cam[:,3][i-1])/dt,\
(cam[:,2][i]-cam[:,2][i-1])/dt])
dv.append([(cam[:,4][i]-cam[:,4][i-1])/dt,(cam[:,5][i]-cam[:,5][i-1])/dt,\
(cam[:,6][i]-cam[:,6][i-1])/dt])
for i in range(len(cv)):
final.append([cam[:,0][i],ins[:,1][i],ins[:,2][i],ins[:,3][i],ins[:,7][i],\
cv[i][0],cv[i][1],cv[i][2],ins[:,4][i],ins[:,5][i],ins[:,6][i],\
ins[:,8][i],ins[:,9][i],ins[:,10][i],dv[i][0],dv[i][1],dv[i][2]])
final = np.array(final)
np.savetxt(filename,final,fmt='%.15e',delimiter=',',\
header='Time(s),INS_Northing(m),INS_Easting(m),INS_Altitude(m),INS_pos_Stdev (m),Cam_Vn(m/s),Cam_Ve(m/s),Cam_Va(m/s),INS_Roll (radians),INS_Pitch (radians),INS_Yaw (radians),INS_Roll_sd (radians),INS_Pitch_sd (radians),INS_Yaw_sd (radians),Cam_Vroll (radians),Cam_Vpitch (radians),Cam_Vyaw (radians)')
def convertToCoordinates(cam, ins):
cins, ccam = [], []
for i in range(len(cam[:,0])):
cordins = utm.to_latlon(ins[:,2][i],ins[:,1][i],zone_number=34,\
zone_letter='V')
cordcam = utm.to_latlon(cam[:,3][i],cam[:,1][i],zone_number=34,\
zone_letter='V')
cins.append([cam[:,0][i],cordins[0],cordins[1]])
ccam.append([cam[:,0][i],cordcam[0],cordcam[1]])
return np.array(ccam),np.array(cins)
def readTimeStamps(filename):
results = []
f = open(filename, 'r')
for line in f:
s = [float(i.strip('\n')) for i in line.split('\t')]
s = (s[3]-1)*60*60+s[4]*60+s[5]+17
results.append(s)
f.close()
return np.float64(results)
def evaluateWithTimestamps(cam, ins, timestamps, wps):
camrmse, insrmse = 0, 0
positions = []
for c in range(len(timestamps)):
dif = []
for i in range(len(cam[:,0])):
dif.append(abs(cam[:,0][i]-(timestamps[c]+4)))
ind = dif.index(min(dif))
camrmse += np.sqrt((vincenty((wps[c][0],wps[c][1]),(wps[c][0],cam[:,2][ind])).m)**2+\
(vincenty((wps[c][0],wps[c][1]),(cam[:,1][ind],wps[c][1])).m)**2)
insrmse += np.sqrt((vincenty((wps[c][0],wps[c][1]),(wps[c][0],ins[:,2][ind])).m)**2+\
(vincenty((wps[c][0],wps[c][1]),(ins[:,1][ind],wps[c][1])).m)**2)
positions.append(ins[ind])
return camrmse, insrmse, np.float64(positions)
def makePlot(data1,data2,wps,tspos):
gmap = gmplot.GoogleMapPlotter(63.821482, 20.311794, 16)
gmap.plot(data1[:,1], data1[:,2], 'cornflowerblue', edge_width=3)
gmap.plot(data2[:,1], data2[:,2], 'gold', edge_width=3)
gmap.scatter(wps[:,0],wps[:,1], 'crimson', size=0.3, marker=False)
gmap.scatter(tspos[:,1],tspos[:,2],'black', size=0.3, marker=False)
gmap.plot
gmap.draw("newtest1130.html")
def exportUnityPathINS(data, filename):
f = open(filename,'w')
for i in range(len(data[:,0])):
f.write(str(data[:,2][i])+','+str(data[:,3][i])+','+str(data[:,1][i])+'\n')
f.close()
def exportUnityPathCam(data, filename):
f = open(filename,'w')
for i in range(len(data[:,0])):
f.write(str(data[:,3][i])+','+str(data[:,2][i])+','+str(data[:,1][i])+'\n')
f.close()
#Variable declerations
wps = np.array([[63.820783,20.312628],[63.821467,20.313170],[63.821569,20.313185],\
[63.821678,20.313206],[63.821811,20.313260],[63.822064,20.313428],
[63.822056,20.313716], |
Zardinality/TF_Deformable_Net | lib/__init__.py | Python | mit | 18 | 0.055556 | #import fast_rcnn | ||
pikuli-project/pikuli | test/test_region.py | Python | mit | 12,465 | 0.000481 | # -*- coding: utf-8 -*-
import pytest
from hamcrest import assert_that, equal_to, all_of, \
instance_of, has_property, calling, raises
from pikuli import Region, Location
from pikuli.common_exceptions import FailExit
X = 311
Y = 128
ID = 3
WIDTH = 303
HEIGHT = 201
FIND_TIMEOUT = 3.1
TITLE = 'New test region'
class T | estRegion(object):
test_region = Region(X, Y, WIDTH, HEIGHT, title=TITLE, id=ID)
def test_str(self):
assert_that(str(self.test_region),
equal_to('Region "%s" (%i, %i, %i, %i)' %
(TITLE, X, Y, WIDTH, HEIGHT)))
@pytest.mark.parametrize("id,region,expected", [
("Default id", Region(X, Y, WIDTH, HEIGHT), 0),
("Custom id", test_region, 3)
])
def test_id(self, id, region, expected):
assert_that(region.get_ | id(), equal_to(expected))
@pytest.mark.parametrize("title,region,expected", [
("Default title", Region(X, Y, WIDTH, HEIGHT), 'New Region'),
("Custom title", test_region, 'New test region')
])
def test_title(self, title, region, expected):
assert_that(region.title, equal_to(expected))
@pytest.mark.parametrize("dimension,value,expected", [
("x value", test_region.x, X),
("y value", test_region.y, Y),
("width", test_region.w, WIDTH),
("height", test_region.h, HEIGHT)
])
def test_get_dimensions(self, dimension, value, expected):
assert_that(value, equal_to(expected))
@pytest.mark.parametrize("x_offset,y_offset", [
(0, 0), (15, 11),
])
def test_get_top_left(self, x_offset, y_offset):
location = self.test_region.get_top_left(x_offset, y_offset)
assert_that(
location, all_of(
has_property('x', X + x_offset),
has_property('y', Y + y_offset),
has_property('title', 'Top left corner of {}'.format(TITLE)))
)
@pytest.mark.parametrize("x_offset,y_offset", [
(0, 0), (15, 11),
])
def test_get_top_right(self, x_offset, y_offset):
location = self.test_region.get_top_right(x_offset, y_offset)
assert_that(
location, all_of(
has_property('x', X + x_offset + WIDTH),
has_property('y', Y + y_offset),
has_property('title', 'Top right corner of {}'.format(TITLE)))
)
@pytest.mark.parametrize("x_offset,y_offset", [
(0, 0), (15, 11),
])
def test_get_bottom_left(self, x_offset, y_offset):
location = self.test_region.get_bottom_left(x_offset, y_offset)
assert_that(
location, all_of(
has_property('x', X + x_offset),
has_property('y', Y + y_offset + HEIGHT),
has_property('title', 'Bottom left corner of {}'.format(TITLE)))
)
@pytest.mark.parametrize("x_offset,y_offset", [
(0, 0), (15, 11),
])
def test_get_bottom_right(self, x_offset, y_offset):
location = self.test_region.get_bottom_right(x_offset, y_offset)
assert_that(
location, all_of(
has_property('x', X + x_offset + WIDTH),
has_property('y', Y + y_offset + HEIGHT),
has_property('title', 'Bottom right corner of {}'.format(TITLE)))
)
@pytest.mark.parametrize("x_offset,y_offset", [
(0, 0), (15, 11),
])
def test_get_center(self, x_offset, y_offset):
location = self.test_region.get_center(x_offset, y_offset)
assert_that(
location, all_of(
has_property('x', X + x_offset + int(WIDTH / 2)),
has_property('y', Y + y_offset + int(HEIGHT / 2)),
has_property('title', 'Center of {}'.format(TITLE)))
)
def test_get_find_timeout(self):
assert_that(
self.test_region.get_find_timeout(), equal_to(FIND_TIMEOUT)
)
def test_set_find_timeout(self):
self.test_region.set_find_timeout(FIND_TIMEOUT + 5)
assert_that(
self.test_region.get_find_timeout(), equal_to(FIND_TIMEOUT + 5)
)
@pytest.mark.parametrize("relation,value,expected", [
("top-left", X + 10, X + 10),
("center", X + 10, X + 10 - int(test_region.w / 2))
])
def test_set_x(self, relation, value, expected):
self.test_region.set_x(value, relation=relation)
assert_that(self.test_region.x, equal_to(expected))
@pytest.mark.parametrize("relation,value", [
("some_relation", X + 10),
("center", '10'),
("center", 'ten')
])
def test_fail_set_x(self, relation, value):
assert_that(calling(self.test_region.set_x).with_args(value, relation=relation),
raises(FailExit))
@pytest.mark.parametrize("relation,value,expected", [
("top-left", Y + 10, Y + 10),
("center", Y + 10, Y + 10 - int(test_region.h / 2))
])
def test_set_y(self, relation, value, expected):
self.test_region.set_y(value, relation=relation)
assert_that(self.test_region.y, equal_to(expected))
@pytest.mark.parametrize("relation,value", [
("some_relation", Y + 10),
("center", '10'),
("center", 'ten')
])
def test_fail_set_y(self, relation, value):
assert_that(calling(self.test_region.set_y).with_args(value, relation=relation),
raises(FailExit))
@pytest.mark.parametrize("relation,value,expected_x", [
("top-left", WIDTH + 10, X),
("center", WIDTH + 10, X - 5)
])
def test_set_width(self, relation, value, expected_x):
reg = Region(X, Y, WIDTH, HEIGHT, title=TITLE, id=ID)
reg.set_w(value, relation=relation)
assert_that(all_of(
reg, has_property('w', value),
reg, has_property('x', expected_x))
)
@pytest.mark.parametrize("relation,value", [
("some_relation", WIDTH + 10),
("center", '10'),
("center", 'ten')
])
def test_fail_set_width(self, relation, value):
assert_that(calling(self.test_region.set_w).with_args(value, relation=relation),
raises(FailExit))
@pytest.mark.parametrize("relation,value,expected_y", [
("top-left", HEIGHT + 10, Y),
("center", HEIGHT + 10, Y - 5)
])
def test_set_height(self, relation, value, expected_y):
reg = Region(X, Y, WIDTH, HEIGHT, title=TITLE, id=ID)
reg.set_h(value, relation=relation)
assert_that(all_of(
reg, has_property('h', value),
reg, has_property('y', expected_y))
)
@pytest.mark.parametrize("relation,value", [
("some_relation", WIDTH + 10),
("center", '10'),
("center", 'ten')
])
def test_fail_set_height(self, relation, value):
assert_that(calling(self.test_region.set_h).with_args(value, relation=relation),
raises(FailExit))
@pytest.mark.parametrize("relation,args,expected", [
('top-left',
[Region(X + 10, Y + 10, WIDTH + 10, HEIGHT + 10)],
[X + 10, Y + 10, WIDTH + 10, HEIGHT + 10]),
('top-left',
[X + 10, Y + 10, WIDTH + 10, HEIGHT + 10],
[X + 10, Y + 10, WIDTH + 10, HEIGHT + 10]),
('center',
[X + 10, Y + 10, WIDTH + 10, HEIGHT + 10],
[X + 10 - int((WIDTH + 10) / 2),
Y + 10 - int((HEIGHT + 10) / 2),
WIDTH + 10,
HEIGHT + 10])
])
def test_set_rect(self, relation, args, expected):
self.test_region.set_rect(*args, relation=relation)
assert_that(
self.test_region, all_of(
has_property('x', expected[0]),
has_property('y', expected[1]),
has_property('w', expected[2]),
has_property('h', expected[3]))
)
@pytest.mark.parametrize("region,args,expected", [
(Region(X, Y, WIDTH, HEIGHT, title=TITLE, id=ID),
[Location(13, 14)],
[test_region.x + 13, test_region.y + 14, WIDTH, HEIGHT]),
(Region(X, Y, WIDTH, HEIGHT, title=TITLE, id=ID),
[13, 14],
|
bdelliott/wordgame | web/django/templatetags/i18n.py | Python | mit | 13,319 | 0.003003 | import re
from django.template import Node, Variable, VariableNode
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.template.base import _render_value_in_context
from django.utils import translation
from django.utils.encoding import force_unicode
from django.template.defaulttags import token_kwargs
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = Variable(lang_code)
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = Variable(languages)
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop):
self.noop = noop
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" % self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
output = self.filter_expression.resolve(context)
return _render_value_in_context(output, context)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[s | elf.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
result = translation.ugettext(singular)
# Escape all isolated '%' before substituting in the context | .
result = re.sub(u'%(?!\()', u'%%', result)
data = dict([(v, _render_value_in_context(context[v], context)) for v in vars])
context.pop()
return result % data
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(args[2], args[4])
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(args[2], args[4])
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
|
orlenko/FBBot | src/FBBot/fbposter/management/commands/populate.py | Python | apache-2.0 | 412 | 0.002427 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from fbposter.models import FacebookStatus
import facebook
import datetime
class Command(BaseCommand):
args = ''
help = 'Gets fresh message | s from Reddit'
def handle(self, *args, **options):
if len(args) < 1:
self.stdout.write('Usage: ./manage.py facebook_post ')
return
| |
255BITS/HyperGAN | tests/losses/test_least_squares_loss.py | Python | mit | 966 | 0.005176 | import tensorflow as tf
import hyperchamber as hc
import hypergan as hg
import numpy as np
from hypergan.losses.least_squares_loss import LeastSquaresLoss
from hypergan.ops import TensorflowOps
from unittest.mock import MagicMock
from tests.mocks import mock_gan
loss_config = {'test': True, 'reduce':'reduce_mean', 'labels': [0,1,0]}
class LeastSquaresLossTest(tf.test.TestCas | e):
def test_config(self):
with self.test_session():
loss = LeastSquaresL | oss(mock_gan(), loss_config)
self.assertTrue(loss.config.test)
def test_create(self):
with self.test_session():
gan = mock_gan()
loss = LeastSquaresLoss(gan, loss_config)
d_loss, g_loss = loss.create()
d_shape = gan.ops.shape(d_loss)
g_shape = gan.ops.shape(g_loss)
self.assertEqual(sum(d_shape), 0)
self.assertEqual(sum(g_shape), 0)
if __name__ == "__main__":
tf.test.main()
|
cmmorrow/sci-analysis | sci_analysis/analysis/base.py | Python | mit | 5,281 | 0.002083 | """Module: base.py
Classes:
Analysis - Generic analysis root class.
Test - Generic statistical test class.
GroupTest - Perform a test on multiple vectors that are passed as a tuple of arbitrary length.
Comparison - Perform a test on two independent vectors of equal length.
NormTest - Tests for whether data is normally distributed or not.
GroupNormTest - Tests a group of data to see if they are normally distributed or not.
TTest - Performs a T-Test on the two provided vectors.
LinearRegression - Performs a linear regression between two vectors.
Correlation - Performs a pearson or spearman correlation between two vectors.
Anova - Performs a one-way ANOVA on a group of vectors.
Kruskal - Performs a non-parametric Kruskal-Wallis test on a group of vectors.
EqualVariance - Checks a group of vectors for equal variance.
VectorStatistics - Reports basic summary stats for a provided vector.
GroupStatistics - Reports basic summary stats for a group of vectors.
Functions:
analyze - Magic method for performing quick data analysis.
"""
# Numpy imports
from numpy import float_, int_
class Analysis(object):
"""Generic analysis root class.
Members:
_data - the data used for analysis.
_display - flag for whether to display the analysis output.
_results - A dict of the results of the test.
Methods:
logic - This method needs to run the analysis, set the results member, and display the output at bare minimum.
run - This method should return the results of the specific analysis.
output - This method shouldn't return a value and only produce a side-effect.
"""
_name = "Analysis"
def __init__(self, data, display=True):
"""Initialize the data and results members.
Override this method to initialize additional members or perform
checks on data.
"""
self._data = data
self._display = display
self._results = {}
@property
def name(self):
"""The name of the test class"""
return self._name
@property
def data(self):
"""The data used for analysis"""
return self._data
@property
def results(self):
"""A dict of the results returned by the run method"""
return self._results
def logic(self):
"""This method needs to run the analysis, set the results member, and
display the output at bare minimum.
Override this method to modify the execution sequence of the analysis.
"""
if self._data is None:
return
self.run()
if self._display:
print(self)
def run(self):
"""This method should perform the specific analysis and set the results dict.
Override this method to perform a specific analysis or calculation.
"""
raise NotImplementedError
def __str__(self):
return std_output(self._name, self._results, tuple(self._results.keys()))
def std_output(name, results, order, precision=4, spacing=14):
"""
Parameters
----------
name : str
The name of the analysis report.
results : dict or list
The input dict or list to print.
order : list or tuple
The list of keys in results to display and the order to display them in.
precision : int
The number of decimal places to show for float values.
spacing : int
The max number of characters for each printed column.
Returns
-------
output_string : str
The report to be printed to stdout.
"""
def format_header(col_names):
line = ""
for n in col_names:
line += '{:{}s}'.format(n, spacing)
return line
def format_row(_row, _order):
line = ""
for column in _order:
value = _row[column]
t = type(value)
if t in [float, float_]:
line += '{:< {}.{}f}'.format(value, spacing, precision)
elif t in [float, float_]:
line += '{:< {}d}'.format(value, spacing)
else:
line += '{:<{}s}'.format(str(value), spacing)
return line
def format_items(label, value):
if type(value) in {float, float_}:
line = '{:{}s}'.format(label, max_length) + ' = ' + '{:< .{}f}'.format(value, precision)
elif type(value) in {int, int_}:
line = '{:{}s}'.format(label, max_length) + ' = ' + '{:< d}'.format(value)
else:
line = '{:{}s}'.format(label, max_length) + ' = | ' + str(value)
return line
table = list()
header = ''
if isinstance(results, list):
header = format_header(order)
for row in results:
table.append(format_row(row, order))
elif isinstance(results, dict):
| max_length = max([len(label) for label in results.keys()])
for key in order:
table.append(format_items(key, results[key]))
out = [
'',
'',
name,
'-' * len(name),
''
]
if len(header) > 0:
out.extend([
header,
'-' * len(header)
])
out.append('\n'.join(table))
return '\n'.join(out)
|
google/neural-light-transport | nlt/datasets/nlt.py | Python | apache-2.0 | 7,804 | 0.001281 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=relative-beyond-top-level
from os.path import join, exists
from itertools import product
import re
import numpy as np
from PIL import Image
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import xiuminglib as xm
from util import logging as logutil, io as ioutil
from .base import Dataset as BaseDataset
logger = logutil.Logger(loggee="datasets/nlt")
class Dataset(BaseDataset):
def __init__(self, config, mode, **kwargs):
self.data_root = config.get('DEFAULT', 'data_root')
data_status_path = self.data_root.rstrip('/') + '.json'
if not exists(data_status_path):
raise FileNotFoundError((
"Data status JSON not found at \n\t%s\nRun "
"$REPO/data_gen/postproc.py to generate it") % data_status_path)
self.data_paths = ioutil.read_json(data_status_path)
# Because paths in JSON are relative, prepend data root directory
for _, paths in self.data_paths.items():
for k, v in paths.items():
if k != 'complete':
paths[k] = join(self.data_root, v)
super().__init__(config, mode, | **kwa | rgs)
# Trigger init. in a main thread before starting multi-threaded work.
# See http://yaqs/eng/q/6292200559345664 for details
Image.init()
def _glob(self):
# Handle holdouts
holdout_cam = self.config.get('DEFAULT', 'holdout_cam').split(',')
holdout_light = self.config.get('DEFAULT', 'holdout_light').split(',')
holdout = [
'%s_%s' % x for x in product(holdout_cam, holdout_light)]
# Add only if data are complete for this camera
ids = []
for id_, paths in self.data_paths.items():
if id_.startswith('test' if self.mode == 'test' else 'trainvali'):
if paths['complete']:
ids.append(id_)
else:
logger.warn(
"Skipping '%s' because its data are incomplete", id_)
# Shortcircuit if testing
if self.mode == 'test':
logger.info(
"Number of '%s' camera-light combinations: %d", self.mode,
len(ids))
return ids
# Training-validation split
ids_split = []
for id_ in ids:
# ID is {bin_mode}_{i:09d}_{cam}_{light}
cam_light = '_'.join(id_.split('_')[-2:])
if (self.mode == 'vali' and cam_light in holdout) or \
(self.mode != 'vali' and cam_light not in holdout):
ids_split.append(id_)
logger.info(
"Number of '%s' camera-light combinations: %d", self.mode,
len(ids_split))
return ids_split
def _get_nn_id(self, nn):
id_regex = re.compile(
r'trainvali_\d\d\d\d\d\d\d\d\d_{cam}_{light}'.format(**nn))
matched = [
x for x in self.data_paths.keys() if id_regex.search(x) is not None]
n_matches = len(matched)
if not matched:
return None
if n_matches == 1:
return matched[0]
raise ValueError(
"Found {n} matches:\n\t{matches}".format(
n=n_matches, matches=matched))
def _process_example_precache(self, id_): # pylint: disable=arguments-differ
"""Loads data from paths.
"""
id_, base, cvis, lvis, warp, rgb, rgb_camspc, nn_id, nn_base, nn_rgb, \
nn_rgb_camspc = tf.py_function(
self._load_data, [id_], (
tf.string, tf.float32, tf.float32, tf.float32, tf.float32,
tf.float32, tf.float32, tf.string, tf.float32, tf.float32,
tf.float32))
return \
id_, base, cvis, lvis, warp, rgb, rgb_camspc, nn_id, nn_base, \
nn_rgb, nn_rgb_camspc
def _load_data(self, id_):
if isinstance(id_, tf.Tensor):
id_ = id_.numpy().decode()
paths = self.data_paths[id_]
imh = self.config.getint('DEFAULT', 'imh')
imw = self.config.getint('DEFAULT', 'imw')
# Load images
base = xm.io.img.load(paths['diffuse'], as_array=True)[:, :, :3]
cvis = xm.io.img.load(paths['cvis'], as_array=True)
lvis = xm.io.img.load(paths['lvis'], as_array=True)
warp = ioutil.read_npy(paths['uv2cam'])
if self.mode == 'test':
rgb = np.zeros_like(base) # placeholders
rgb_camspc = np.zeros((imh, imw, 3))
else:
rgb = xm.io.img.load(paths['rgb'], as_array=True)[:, :, :3]
rgb_camspc = xm.io.img.load(
paths['rgb_camspc'], as_array=True)[:, :, :3]
# Normalize to [0, 1]
base = xm.img.normalize_uint(base)
cvis = xm.img.normalize_uint(cvis)
lvis = xm.img.normalize_uint(lvis)
if self.mode != 'test':
rgb = xm.img.normalize_uint(rgb)
rgb_camspc = xm.img.normalize_uint(rgb_camspc)
# Resize images
uvh = self.config.getint('DEFAULT', 'uvh')
base = xm.img.resize(base, new_h=uvh)
cvis = xm.img.resize(cvis, new_h=uvh)
lvis = xm.img.resize(lvis, new_h=uvh)
rgb = xm.img.resize(rgb, new_h=uvh)
rgb_camspc = xm.img.resize(rgb_camspc, new_h=imh, new_w=imw)
# NOTE: We didn't resize warp because this introduces artifacts --
# always warp first and then resize
# Neighbor diffuse base and full
nn = ioutil.read_json(paths['nn'])
nn_id = self._get_nn_id(nn)
if nn_id is None:
nn_id = 'incomplete-data_{cam}_{light}'.format(**nn)
# NOTE: When neighbor is missing, simply return black placeholders
nn_base = np.zeros_like(base)
nn_rgb = np.zeros_like(rgb)
nn_rgb_camspc = np.zeros_like(rgb_camspc)
else:
nn_base = xm.io.img.load(
self.data_paths[nn_id]['diffuse'], as_array=True)[:, :, :3]
nn_rgb = xm.io.img.load(
self.data_paths[nn_id]['rgb'], as_array=True)[:, :, :3]
nn_rgb_camspc = xm.io.img.load(
self.data_paths[nn_id]['rgb_camspc'], as_array=True)[:, :, :3]
nn_rgb_camspc = nn_rgb_camspc[:, :, :3] # discards alpha
nn_base = xm.img.normalize_uint(nn_base)
nn_rgb = xm.img.normalize_uint(nn_rgb)
nn_rgb_camspc = xm.img.normalize_uint(nn_rgb_camspc)
nn_base = xm.img.resize(nn_base, new_h=uvh)
nn_rgb = xm.img.resize(nn_rgb, new_h=uvh)
nn_rgb_camspc = xm.img.resize(nn_rgb_camspc, new_h=imh, new_w=imw)
# Return
base = base.astype(np.float32)
cvis = cvis.astype(np.float32)[:, :, None] # HxWx1
lvis = lvis.astype(np.float32)[:, :, None]
warp = warp.astype(np.float32)
rgb = rgb.astype(np.float32)
rgb_camspc = rgb_camspc.astype(np.float32)
nn_base = nn_base.astype(np.float32)
nn_rgb = nn_rgb.astype(np.float32)
nn_rgb_camspc = nn_rgb_camspc.astype(np.float32)
return \
id_, base, cvis, lvis, warp, rgb, rgb_camspc, nn_id, nn_base, \
nn_rgb, nn_rgb_camspc
|
dpac-vlsi/SynchroTrace | util/ext/ply/test/yacc_literal.py | Python | bsd-3-clause | 1,566 | 0.014687 | # -----------------------------------------------------------------------------
# yacc_literal.py
#
# Grammar with bad literal characters
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','+','-'),
('left','*','/'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : | NAME EQUALS expression'
names[t[1]] = t[3]
def p_ | statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression
| expression '**' expression '''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
assefay/inasafe | safe_qgis/impact_statistics/test/test_aggregator.py | Python | gpl-3.0 | 16,606 | 0.001144 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **GUI Test Cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Marco Bernasocchi'
__date__ = '10/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
import sys
import os
import | logging
import numpy.testing
from os.path import join
# Add PARENT directory to path to make test aware of other modules
pardir = os.path.abspath(join(os.path.dirname(__file__), '..'))
sys.path.append(pardir)
from qgis.core import QgsVectorLayer, QgsCoordinateReferenceSystem
from safe.common.testing import get_qgis_app
from s | afe_qgis import breakdown_defaults
from safe_qgis.safe_interface import (
UNITDATA,
TESTDATA,
BOUNDDATA,
Raster,
Vector)
from safe_qgis.utilities.utilities_for_testing import (
set_canvas_crs,
set_jakarta_extent,
GEOCRS)
from safe_qgis.widgets.dock import Dock
from safe_qgis.impact_statistics.aggregator import Aggregator
#from safe_qgis.utilities.clipper import clip_layer
from safe_qgis.utilities.keyword_io import KeywordIO
from safe_qgis.utilities.utilities import (
extent_to_geo_array)
from safe_qgis.utilities.utilities_for_testing import (
load_standard_layers,
setup_scenario,
load_layers)
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
DOCK = Dock(IFACE)
LOGGER = logging.getLogger('InaSAFE')
#noinspection PyArgumentList
class AggregatorTest(unittest.TestCase):
"""Test the InaSAFE GUI"""
#noinspection PyPep8Naming
def setUp(self):
"""Fixture run before all tests"""
self.maxDiff = None # show full diff for assert errors
os.environ['LANG'] = 'en'
DOCK.show_only_visible_layers_flag = True
load_standard_layers()
DOCK.cboHazard.setCurrentIndex(0)
DOCK.cboExposure.setCurrentIndex(0)
DOCK.cboFunction.setCurrentIndex(0)
DOCK.run_in_thread_flag = False
DOCK.show_only_visible_layers_flag = False
DOCK.set_layer_from_title_flag = False
DOCK.zoom_to_impact_flag = False
DOCK.hide_exposure_flag = False
DOCK.show_intermediate_layers = False
set_jakarta_extent()
self._keywordIO = KeywordIO()
self._defaults = breakdown_defaults()
# Set extent as Jakarta extent
geo_crs = QgsCoordinateReferenceSystem()
geo_crs.createFromSrid(4326)
self.extent = extent_to_geo_array(CANVAS.extent(), geo_crs)
def test_combo_aggregation_loaded_project(self):
"""Aggregation combo changes properly according loaded layers"""
layer_list = [
DOCK.tr('Entire area'),
DOCK.tr('kabupaten jakarta singlepart')]
current_layers = [DOCK.cboAggregation.itemText(i) for i in range(
DOCK.cboAggregation.count())]
message = (
'The aggregation combobox should have:\n %s \nFound: %s'
% (layer_list, current_layers))
self.assertEquals(current_layers, layer_list, message)
def test_aggregation_attribute_in_keywords(self):
"""Aggregation attribute is chosen correctly when present in keywords.
"""
attribute_key = breakdown_defaults('AGGR_ATTR_KEY')
# with KAB_NAME aggregation attribute defined in .keyword using
# kabupaten_jakarta_singlepart.shp
result, message = setup_scenario(
DOCK,
hazard='A flood in Jakarta like in 2007',
exposure='People',
function='Need evacuation',
function_id='Flood Evacuation Function',
aggregation_layer='kabupaten jakarta singlepart',
aggregation_enabled_flag=True)
assert result, message
# Press RUN
DOCK.accept()
DOCK.runtime_keywords_dialog.accept()
attribute = DOCK.aggregator.attributes[attribute_key]
message = ('The aggregation should be KAB_NAME. Found: %s' % attribute)
self.assertEqual(attribute, 'KAB_NAME', message)
def test_check_aggregation_single_attribute(self):
"""Aggregation attribute is chosen correctly when there is only
one attr available."""
file_list = ['kabupaten_jakarta_singlepart_1_good_attr.shp']
#add additional layers
load_layers(file_list, clear_flag=False)
attribute_key = breakdown_defaults('AGGR_ATTR_KEY')
# with 1 good aggregation attribute using
# kabupaten_jakarta_singlepart_1_good_attr.shp
result, message = setup_scenario(
DOCK,
hazard='A flood in Jakarta like in 2007',
exposure='People',
function='Need evacuation',
function_id='Flood Evacuation Function',
aggregation_layer='kabupaten jakarta singlepart 1 good attr')
assert result, message
# Press RUN
# noinspection PyCallByClass,PyTypeChecker
DOCK.accept()
DOCK.runtime_keywords_dialog.accept()
print attribute_key
print DOCK.aggregator.attributes
attribute = DOCK.aggregator.attributes[attribute_key]
message = (
'The aggregation should be KAB_NAME. Found: %s' % attribute)
self.assertEqual(attribute, 'KAB_NAME', message)
#noinspection PyMethodMayBeStatic
def test_check_aggregation_no_attributes(self):
"""Aggregation attribute chosen correctly when no attr available."""
file_list = ['kabupaten_jakarta_singlepart_0_good_attr.shp']
#add additional layers
load_layers(file_list, clear_flag=False)
attribute_key = breakdown_defaults('AGGR_ATTR_KEY')
# with no good aggregation attribute using
# kabupaten_jakarta_singlepart_0_good_attr.shp
result, message = setup_scenario(
DOCK,
hazard='A flood in Jakarta like in 2007',
exposure='People',
function='Need evacuation',
function_id='Flood Evacuation Function',
aggregation_layer='kabupaten jakarta singlepart 0 good attr')
assert result, message
# Press RUN
DOCK.accept()
DOCK.runtime_keywords_dialog.accept()
attribute = DOCK.aggregator.attributes[attribute_key]
message = (
'The aggregation should be None. Found: %s' % attribute)
assert attribute is None, message
#noinspection PyMethodMayBeStatic
def test_check_aggregation_none_in_keywords(self):
"""Aggregation attribute is chosen correctly when None in keywords."""
file_list = ['kabupaten_jakarta_singlepart_with_None_keyword.shp']
#add additional layers
load_layers(file_list, clear_flag=False)
attribute_key = breakdown_defaults('AGGR_ATTR_KEY')
# with None aggregation attribute defined in .keyword using
# kabupaten_jakarta_singlepart_with_None_keyword.shp
result, message = setup_scenario(
DOCK,
hazard='A flood in Jakarta like in 2007',
exposure='People',
function='Need evacuation',
function_id='Flood Evacuation Function',
aggregation_layer='kabupaten jakarta singlepart with None keyword')
assert result, message
# Press RUN
DOCK.accept()
DOCK.runtime_keywords_dialog.accept()
attribute = DOCK.aggregator.attributes[attribute_key]
message = ('The aggregation should be None. Found: %s' % attribute)
assert attribute is None, message
def test_setup_target_field(self):
"""Test setup up target field is correct
"""
layer = QgsVectorLayer(
os.path.join(BOUNDDATA, 'kabupaten_jakarta.shp'),
'test aggregation',
'ogr')
aggregator = Aggregator(self.extent, None)
self.assertFalse(aggregator._setup_target_field(laye |
rtrwalker/geotecha | examples/speccon/speccon1d_vr_4layers_vert_schiffmanandstein1970_Fig2.py | Python | gpl-3.0 | 2,145 | 0.000932 | # speccon1d_vr example
# Vertical consolidation of four soil layers
# Figure 2 from:
# Schiffman, R. L, and J. R Stein. (1970) 'One-Dimensional Consolidation of
# Layered Systems'. Journal of the Soil Mechanics and Foundations
# Division 96, no. 4 (1970): 1499-1504.
from __future__ import division, print_function
import numpy as np
from geotecha.speccon.speccon1d_vr import Speccon1dVR
import matplotlib.pyplot as plt
# the reader string is a template with {} indicating where parameters will be
# inserted. Use double curly braces {{}} if you need curly braces in your
# string.
reader = """\
# Parameters from Schiffman and Stein(1970)
h = np.array([10, 20, 30, 20]) # feet
cv = np.array([0.0411, 0.1918, 0.0548, 0.0686]) # square feet per day
mv = np.array([3.07e-3, 1.95e-3, 9.74e-4, 1.95e-3]) # square feet per kip
#kv = np.array([7.89e-6, 2.34e-5, 3.33e-6, 8.35e-6]) # feet per day
kv = cv*mv # assume kv val | ues are actually kv/gamw
# speccon1d_vr parameters
drn = 0
neig = 60
H = np.sum(h)
z2 = np.cumsum(h) / H # Normalized Z at bottom of each layer
z1 = (np.cumsum(h) - h) / H # Normalized Z at top of each layer
mvref = mv[0] # Choosing 1st layer as reference value
kvref = kv[0] # Choo | sing 1st layer as reference value
dTv = 1 / H**2 * kvref / mvref
mv = PolyLine(z1, z2, mv/mvref, mv/mvref)
kv = PolyLine(z1, z2, kv/kvref, kv/kvref)
surcharge_vs_time = PolyLine([0,0,30000], [0,1,1])
surcharge_vs_depth = PolyLine([0,1], [1,1]) # Load is uniform with depth
ppress_z = np.linspace(0,1,200)
tvals = [740, 2930, 7195]
show_figures=False
author = "Dr. Rohan Walker"
"""
# Run the speccon analysis
a = Speccon1dVR(reader)
a.make_all()
# Make a custom plot (alternately use "show_figures=True" in the input for
# generic plots)
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot('111')
ax.set_xlabel('Pore pressure, u/q')
ax.set_ylabel('Depth, feet')
ax.invert_yaxis()
ax.grid()
ax.set_title('Schiffman and Stein (1970), Figure 2')
lineObjects = plt.plot(a.por, a.ppress_z * a.H)
leg_title='Time (days)'
leg = ax.legend(lineObjects, a.tvals, title=leg_title, loc=6)
leg.draggable()
plt.tight_layout()
plt.show()
|
saltstack/salt | tests/pytests/unit/states/postgresql/test_user.py | Python | apache-2.0 | 23,174 | 0.00082 | import pytest
import salt.modules.postgres as postgres
import salt.states.postgres_user as postgres_user
from tests.support.mock import create_autospec, patch
class ScramHash:
def __eq__(self, other):
return other.startswith("SCRAM-SHA-256$4096:")
@pytest.fixture(name="db_args")
def fixture_db_args():
return {
"runas": None,
"host": None,
"port": None,
"maintenance_db": None,
"user": None,
"password": None,
}
@pytest.fixture(name="md5_pw")
def fixture_md5_pw():
# 'md5' + md5('password' + 'username')
return "md55a231fcdb710d73268c4f44283487ba2"
@pytest.fixture(name="scram_pw")
def fixture_scram_pw():
# scram_sha_256('password')
return (
"SCRAM-SHA-256$4096:wLr5nqC+3F+r7FdQPnB+nA==$"
"0hn08ZdX8kirGaL4TM0j13digH9Wl365OOzCtAuF2pE=:"
"LzAh/MGUdjYkdbDzcOKpfGwa3WwPUsyGcY+TEnSpcto="
)
@pytest.fixture(name="existing_user")
def fixture_existing_user(md5_pw):
return {
"superuser": False,
"inherits privileges": True,
"can create roles": False,
"can create databases": False,
"can update system catalogs": None,
"can login": True,
"replication": False,
"connections": None,
"expiry time": None,
"defaults variables": "",
"password": md5_pw,
"groups": [],
}
@pytest.fixture(name="test_mode")
def fixture_test_mode():
with patch.dict(postgres_user.__opts__, {"test": True}):
yield
@pytest.fixture(name="mocks")
def fixture_mocks():
return {
"postgres.role_get": create_autospec(postgres.role_get, return_value=None),
"postgres.user_exists": create_autospec(
postgres.user_exists, return_value=False
),
"postgres.user_create": create_autospec(
postgres.user_create, return_value=True
),
"postgres.user_update": create_autospec(
postgres.user_update, return_value=True
),
"postgres.user_remove": create_autospec(
postgres.user_remove, return_value=True
),
}
@pytest.fixture(autouse=True)
def setup_loader(mocks):
setup_loader_modules = {
postgres_user: {"__opts__": {"test": False}, "__salt__": mocks},
postgres: {"__opts__": {"test": False}},
}
with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
yield loader_mock
# ==========
# postgres_user.present
# ==========
def test_present_create_basic(mocks, db_args):
assert postgres_user.present("username") == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="md5",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=None,
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
@pytest.mark.usefixtures("test_mode")
def test_present_create_basic_test(mocks, db_args):
assert postgres_user.present("username") == {
"name": "username",
"result": None,
"changes": {},
"comment": "User username is set to be created",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_exists_basic(mocks, existing_user, db_args):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username") == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is already present",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_create_basic_error(mocks, db_args):
mocks["postgres.user_create"].return_value = False
assert postgres_user.present("username") == {
"name": "username",
"result": False,
"changes": {},
"comment": "Failed to create user username",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_called_once()
mocks["postgres.user_update"].assert_not_called()
def test_present_change_option(mocks, existing_user, db_args):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username", replication=True) == {
"name": "username",
"result": True,
"changes": {"username": {"replication": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="md5",
superuser=None,
login=None,
inherit=None,
replication=True,
rolepassword=None,
valid_until=None,
groups=None,
**db_args
)
def test_present_create_md5_password(mocks, md5_pw, db_args):
assert postgres_user.present("username", password="password", encrypted=True) == {
"nam | e": "username",
"result": T | rue,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=md5_pw,
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_create_scram_password(mocks, db_args):
assert postgres_user.present(
"username", password="password", encrypted="scram-sha-256"
) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="scram-sha-256",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=ScramHash(),
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_create_plain_password(mocks, db_args):
assert postgres_user.present("username", password="password", encrypted=False) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=False,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword="password",
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_create_md5_password_default_plain(mocks, monkeypatch, md5_pw, db_args):
monkeypatch.setattr(postgres, "_DEFAULT_PASSWORDS_ENCRYPTION", False)
test_present_create_md |
markflyhigh/incubator-beam | sdks/python/apache_beam/examples/cookbook/bigtableio_it_test.py | Python | apache-2.0 | 7,002 | 0.008569 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittest for GCP Bigtable testing."""
from __future__ import absolute_import
import datetime
import logging
import random
import string
import unittest
import uuid
import pytz
import apache_beam as beam
from apache_beam.io.gcp.bigtableio import WriteToBigTable
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.test_pipeline import TestPipeline
# Protect against environments where bigtable library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _mi | croseconds_from_datetime
from google.cloud._helpers import UTC
from google.cloud.bigtable import row, column_family, Client
except ImportError:
Client = None
UTC = | pytz.utc
_microseconds_from_datetime = lambda label_stamp: label_stamp
_datetime_from_microseconds = lambda micro: micro
EXISTING_INSTANCES = []
LABEL_KEY = u'python-bigtable-beam'
label_stamp = datetime.datetime.utcnow().replace(tzinfo=UTC)
label_stamp_micros = _microseconds_from_datetime(label_stamp)
LABELS = {LABEL_KEY: str(label_stamp_micros)}
class GenerateTestRows(beam.PTransform):
""" A transform test to run write to the Bigtable Table.
A PTransform that generate a list of `DirectRow` to write it in
Bigtable Table.
"""
def __init__(self, number, project_id=None, instance_id=None,
table_id=None):
super(WriteToBigTable, self).__init__()
self.number = number
self.rand = random.choice(string.ascii_letters + string.digits)
self.column_family_id = 'cf1'
self.beam_options = {'project_id': project_id,
'instance_id': instance_id,
'table_id': table_id}
def _generate(self):
value = ''.join(self.rand for i in range(100))
for index in range(self.number):
key = "beam_key%s" % ('{0:07}'.format(index))
direct_row = row.DirectRow(row_key=key)
for column_id in range(10):
direct_row.set_cell(self.column_family_id,
('field%s' % column_id).encode('utf-8'),
value,
datetime.datetime.now())
yield direct_row
def expand(self, pvalue):
beam_options = self.beam_options
return (pvalue
| beam.Create(self._generate())
| WriteToBigTable(beam_options['project_id'],
beam_options['instance_id'],
beam_options['table_id']))
@unittest.skipIf(Client is None, 'GCP Bigtable dependencies are not installed')
class BigtableIOWriteTest(unittest.TestCase):
""" Bigtable Write Connector Test
"""
DEFAULT_TABLE_PREFIX = "python-test"
instance_id = DEFAULT_TABLE_PREFIX + "-" + str(uuid.uuid4())[:8]
cluster_id = DEFAULT_TABLE_PREFIX + "-" + str(uuid.uuid4())[:8]
table_id = DEFAULT_TABLE_PREFIX + "-" + str(uuid.uuid4())[:8]
number = 500
LOCATION_ID = "us-east1-b"
def setUp(self):
try:
from google.cloud.bigtable import enums
self.STORAGE_TYPE = enums.StorageType.HDD
self.INSTANCE_TYPE = enums.Instance.Type.DEVELOPMENT
except ImportError:
self.STORAGE_TYPE = 2
self.INSTANCE_TYPE = 2
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
self.project = self.test_pipeline.get_option('project')
self.client = Client(project=self.project, admin=True)
self._delete_old_instances()
self.instance = self.client.instance(self.instance_id,
instance_type=self.INSTANCE_TYPE,
labels=LABELS)
if not self.instance.exists():
cluster = self.instance.cluster(self.cluster_id,
self.LOCATION_ID,
default_storage_type=self.STORAGE_TYPE)
self.instance.create(clusters=[cluster])
self.table = self.instance.table(self.table_id)
if not self.table.exists():
max_versions_rule = column_family.MaxVersionsGCRule(2)
column_family_id = 'cf1'
column_families = {column_family_id: max_versions_rule}
self.table.create(column_families=column_families)
def _delete_old_instances(self):
instances = self.client.list_instances()
EXISTING_INSTANCES[:] = instances
def age_in_hours(micros):
return (datetime.datetime.utcnow().replace(tzinfo=UTC) - (
_datetime_from_microseconds(micros))).total_seconds() // 3600
CLEAN_INSTANCE = [i for instance in EXISTING_INSTANCES for i in instance if(
LABEL_KEY in i.labels.keys() and
(age_in_hours(int(i.labels[LABEL_KEY])) >= 2))]
if CLEAN_INSTANCE:
for instance in CLEAN_INSTANCE:
instance.delete()
def tearDown(self):
if self.instance.exists():
self.instance.delete()
def test_bigtable_write(self):
number = self.number
pipeline_args = self.test_pipeline.options_list
pipeline_options = PipelineOptions(pipeline_args)
with beam.Pipeline(options=pipeline_options) as pipeline:
config_data = {'project_id':self.project,
'instance_id':self.instance,
'table_id':self.table}
_ = (
pipeline
| 'Generate Direct Rows' >> GenerateTestRows(number, **config_data))
result = pipeline.run()
result.wait_until_finish()
assert result.state == PipelineState.DONE
read_rows = self.table.read_rows()
assert len([_ for _ in read_rows]) == number
if not hasattr(result, 'has_job') or result.has_job:
read_filter = MetricsFilter().with_name('Written Row')
query_result = result.metrics().query(read_filter)
if query_result['counters']:
read_counter = query_result['counters'][0]
logging.info('Number of Rows: %d', read_counter.committed)
assert read_counter.committed == number
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
DHTC-Tools/parrot-benchmark-tools | Root/parrot-root/get_file_list.py | Python | apache-2.0 | 494 | 0.008097 | #!/usr/bin/env python
import sys, random
if len(sys.argv) != 3:
sys.stderr.write("Must provide file with list of filenames and number of files to pick\n")
sys.exit(1)
file_list = open(sys.argv[1])
file_array = []
for filepath in file_list:
file_array.append(filepath. | strip())
try:
choices = int(sys.argv[2])
except:
sys.stderr.write("Can't get the number of files to pick\n")
sys.exit(1)
for i in range(choices):
sys.stdout.write("%s\n" % random.choice(file_array))
| |
psychopy/psychopy | psychopy/experiment/routines/_base.py | Python | gpl-3.0 | 27,908 | 0.001039 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2022 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Describes the Flow of an experiment
"""
from psychopy.constants import FOREVER
from xml.etree.ElementTree import Element
from pathlib import Path
from psychopy.localization import _translate
from psychopy.experiment import Param
class BaseStandaloneRoutine:
categories = ['Custom']
targets = []
iconFile = Path(__file__).parent / "unknown" / "unknown.png"
tooltip = ""
limit = float('inf')
def __init__(self, exp, name='',
stopType='duration (s)', stopVal='',
disabled=False):
self.params = {}
self.name = name
self.exp = exp
self.type = 'StandaloneRoutine'
self.depends = [] # allows params to turn each other off/on
self.order = ['stopVal', 'stopType', 'name']
msg = _translate(
"Name of this routine (alphanumeric or _, no spaces)")
self.params['name'] = Param(name,
valType='code', inputType="single", categ='Basic',
hint=msg,
label=_translate('name'))
self.params['stopVal'] = Param(stopVal,
valType='num', inputType="single", categ='Basic',
updates='constant', allowedUpdates=[], allowedTypes=[],
hint=_translate("When does the routine end? (blank is endless)"),
label=_translate('Stop'))
msg = _translate("How do you want to define your end point?")
self.params['stopType'] = Param(stopType,
valType='str', inputType="choice", categ='Basic',
allowedVals=['duration (s)', 'duration (frames)', 'condition'],
hint=msg, direct=False,
label=_translate('Stop Type...'))
# Testing
msg = _translate("Disable this component")
self.params['disabled'] = Param(disabled,
valType='bool', inputType="bool", categ="Testing",
hint=msg, allowedTypes=[], direct=False,
label=_translate('Disable component'))
def __repr__(self):
_rep = "psychopy.experiment.routines.%s(name='%s', exp=%s)"
return _rep % (self.__class__.__name__, self.name, self.exp)
def __iter__(self):
"""Overloaded iteration behaviour - if iterated through, a standaloneRoutine returns
itself once, so it can be treated like a regular routine"""
self.__iterstop = False
return self
def __next__(self):
"""Overloaded iteration behaviour - if iterated through, a standaloneRoutine returns
itself once, so it can be treated like a regular routine"""
if self.__iterstop:
# Stop after one iteration
self.__iterstop = False
raise StopIteration
else:
self.__iterstop = True
return self
@property
def _xml(self):
# Make root element
element = Element(self.__class__.__name__)
element.set("name", self.params['name'].val)
# Add an element for each parameter
for key, param in sorted(self.params.items()):
# Create node
paramNode = Element("Param")
paramNode.set("name", key)
# Assign values
if hasattr(param, 'updates'):
paramNode.set('updates', "{}".format(param.updates))
if hasattr(param, 'val'):
paramNode.set('val', u"{}".format(param.val).replace("\n", " "))
if hasattr(param, 'valType'):
paramNode.set('valType', param.valType)
element.append(paramNode)
return element
| def writePreCode(self, buff):
return
def writePreCodeJS(self, buff):
return
def writeStartCode(self, buff):
return
def writeStartCodeJS(self, buff):
return
def writeRunOnceInitCode(self, buff):
return
def writeInitCode(self, buff):
return
def writeInitCodeJS(self, buff):
return
def writeMainCode(self, buff):
| return
def writeRoutineBeginCodeJS(self, buff, modular):
return
def writeEachFrameCodeJS(self, buff, modular):
return
def writeRoutineEndCode(self, buff):
# reset routineTimer at the *very end* of all non-nonSlip routines
code = ('# the Routine "%s" was not non-slip safe, so reset '
'the non-slip timer\n'
'routineTimer.reset()\n')
buff.writeIndentedLines(code % self.name)
def writeRoutineEndCodeJS(self, buff, modular):
return
def writeExperimentEndCode(self, buff):
return
def writeExperimentEndCodeJS(self, buff):
return
def getType(self):
return self.__class__.__name__
def getComponentFromName(self, name):
return None
def getComponentFromType(self, thisType):
return None
def hasOnlyStaticComp(self):
return False
def getMaxTime(self):
"""If routine has a set duration, will return this along with True (as this routine is nonSlipSafe, i.e. has a fixed duration). Otherwise, will treat max time as 0 and will mark routine as nonSlipSafe (i.e. has a variable duration)..
"""
# Assume max time of 0 and not nonSlipSafe
maxTime = 0
nonSlipSafe = False
# If has a set duration, store set duration and mark as nonSlipSafe
if 'stopVal' in self.params and 'stopType' in self.params:
if self.params['stopType'] in ['duration (s)', 'duration (frames)']:
maxTime = float(self.params['stopVal'].val or 0)
nonSlipSafe = True
return maxTime, nonSlipSafe
def getStatics(self):
return []
@property
def name(self):
if hasattr(self, 'params'):
if 'name' in self.params:
return self.params['name'].val
return self.type
@name.setter
def name(self, value):
if hasattr(self, 'params'):
if 'name' in self.params:
self.params['name'].val = value
@property
def disabled(self):
return bool(self.params['disabled'])
@disabled.setter
def disabled(self, value):
self.params['disabled'].val = value
class Routine(list):
"""
A Routine determines a single sequence of events, such
as the presentation of trial. Multiple Routines might be
used to comprise an Experiment (e.g. one for presenting
instructions, one for trials, one for debriefing subjects).
In practice a Routine is simply a python list of Components,
each of which knows when it starts and stops.
"""
targets = ["PsychoPy", "PsychoJS"]
def __init__(self, name, exp, components=()):
super(Routine, self).__init__()
self.params = {'name': name}
self.name = name
self.exp = exp
self._clockName = None # for scripts e.g. "t = trialClock.GetTime()"
self.type = 'Routine'
list.__init__(self, list(components))
def __repr__(self):
_rep = "psychopy.experiment.Routine(name='%s', exp=%s, components=%s)"
return _rep % (self.name, self.exp, str(list(self)))
@property
def _xml(self):
# Make root element
element = Element("Routine")
element.set("name", self.name)
# Add each component's element
for comp in self:
element.append(comp._xml)
return element
@property
def name(self):
return self.params['name']
@name.setter
def name(self, name):
self.params['name'] = name
def integrityCheck(self):
"""Run tests on self and on all the Components inside"""
for entry in self:
if hasattr(entry, "integrityCheck"):
entry.integrityCheck()
def addComponent(self, component):
"""Add a component to the end of the routine"""
self.append(component)
def inse |
OCA/account-analytic | mrp_analytic_sale_project/models/sale_order.py | Python | agpl-3.0 | 490 | 0.002041 | # Copyright | (C) 2021 Open Source Integrators
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models
class SaleOrder(models.Model):
_inherit = "sale.order"
def action_confirm(self):
res = super(SaleOrder, self).action_confirm()
for order in self:
order.procurement_group_id.stock_move_ids.created_production_id.write(
{"analytic_account_id": order.analytic_account_i | d}
)
return res
|
ryanpitts/growlerbot | scrapers/__init__.py | Python | mit | 45 | 0 | from | growler_guys import scrape_growler_ | guys
|
Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob/samples/blob_samples_authentication_async.py | Python | mit | 5,908 | 0.003047 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: blob_samples_authentication_async.py
DESCRIPTION:
These samples demonstrate authenticating a client via a connection string,
shared access key, or by generating a sas token with which the returned signature
can be used with the credential parameter of any BlobServiceClient,
ContainerClient, BlobClient.
USAGE:
python blob_samples_authentication_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
2) OAUTH_STORAGE_ACCOUNT_NAME - the oath storage account name
3) AZURE_STORAGE_ACCOUNT_NAME - the name of the storage account
4) AZURE_STORAGE_ACCESS_KEY - the storage account access key
5) ACTIVE_DIRECTORY_APPLICATION_ID - Azure Active Directory application ID
6) ACTIVE_DIRECTORY_APPLICATION_SECRET - Azure Active Directory application secret
7) ACTIVE_DIRECTORY_TENANT_ID - Azure Active Directory tenant ID
"""
import os
import asyncio
class AuthSamplesAsync(object):
url = "https://{}.blob.core.windows.net".format(
os.getenv("AZURE_STORAGE_ACCOUNT_NAME")
)
oauth_url = "https://{}.blob.core.windows.net".format(
os.getenv("OAUTH_STORAGE_ACCOUNT_NAME")
)
connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
shared_access_key = os.getenv("AZURE_STORAGE_ACCESS_KEY")
active_directory_application_id = os.getenv("ACTIVE_DIRECTORY_APPLICATION_ID")
active_dir | ectory_application_secret = os.getenv("ACTIVE_DIRECTORY_APPLICATION_SECRET")
active_directory_tenant_id = os.getenv("ACTIVE_DIRECTORY_TENANT_ID")
async def auth_connection_string_async(self):
# [START auth_from_connection_string]
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.fro | m_connection_string(self.connection_string)
# [END auth_from_connection_string]
# [START auth_from_connection_string_container]
from azure.storage.blob.aio import ContainerClient
container_client = ContainerClient.from_connection_string(
self.connection_string, container_name="mycontainer")
# [END auth_from_connection_string_container]
# [START auth_from_connection_string_blob]
from azure.storage.blob.aio import BlobClient
blob_client = BlobClient.from_connection_string(
self.connection_string, container_name="mycontainer", blob_name="blobname.txt")
# [END auth_from_connection_string_blob]
async def auth_shared_key_async(self):
# [START create_blob_service_client]
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient(account_url=self.url, credential=self.shared_access_key)
# [END create_blob_service_client]
async def auth_blob_url_async(self):
# [START create_blob_client]
from azure.storage.blob.aio import BlobClient
blob_client = BlobClient.from_blob_url(blob_url="https://account.blob.core.windows.net/container/blob-name")
# [END create_blob_client]
# [START create_blob_client_sas_url]
from azure.storage.blob.aio import BlobClient
sas_url = "https://account.blob.core.windows.net/container/blob-name?sv=2015-04-05&st=2015-04-29T22%3A18%3A26Z&se=2015-04-30T02%3A23%3A26Z&sr=b&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https&sig=Z%2FRHIX5Xcg0Mq2rqI3OlWTjEg2tYkboXr1P9ZUXDtkk%3D"
blob_client = BlobClient.from_blob_url(sas_url)
# [END create_blob_client_sas_url]
async def auth_active_directory_async(self):
# [START create_blob_service_client_oauth]
# Get a token credential for authentication
from azure.identity.aio import ClientSecretCredential
token_credential = ClientSecretCredential(self.active_directory_tenant_id, self.active_directory_application_id,
self.active_directory_application_secret)
# Instantiate a BlobServiceClient using a token credential
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient(account_url=self.oauth_url, credential=token_credential)
# [END create_blob_service_client_oauth]
async def auth_shared_access_signature_async(self):
# Instantiate a BlobServiceClient using a connection string
from azure.storage.blob.aio import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(self.connection_string)
# [START create_sas_token]
# Create a SAS token to use to authenticate a new client
from datetime import datetime, timedelta
from azure.storage.blob import ResourceTypes, AccountSasPermissions, generate_account_sas
sas_token = generate_account_sas(
blob_service_client.account_name,
account_key=blob_service_client.credential.account_key,
resource_types=ResourceTypes(object=True),
permission=AccountSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
# [END create_sas_token]
async def main():
sample = AuthSamplesAsync()
# Uncomment the methods you want to execute.
await sample.auth_connection_string_async()
# await sample.auth_active_directory()
await sample.auth_shared_access_signature_async()
await sample.auth_blob_url_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
alaeddine10/ggrc-core | src/ggrc/app.py | Python | apache-2.0 | 1,435 | 0.007666 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By:
# Maintained By:
from . import settings
# Initialize Flask app
from flask import Flask
app = Flask('ggrc', instance_relative_config=True)
app.config.from_object(settings)
# Configure Flask-SQLAlchemy for app
from . import db
db.app = app
db.init_app(app)
# Configure Flask-Login
import ggrc.login
ggrc.login.init_app(app)
# Configure webassets for app
from . import assets
app.jinja_env.add_extension('webassets.ext.jinja2.assets')
app.jinja_env.assets_environment = assets.environment
# Configure Jinja2 extensions for app
app.jinja_env.add_extension('jinja2.ext.aut | oescape')
app.jinja_env.add_extension('jinja2.ext.with_')
app.jinja_env.add_extension('hamlpy.ext.HamlPyExtension')
# Initialize services
import ggrc.services
ggrc.services.init_all_services(app)
# Initialize views
import ggrc.views
ggrc.views.init_all_object_views(app)
# Initialize configured and default extensions
from ggrc.fulltext import get_indexer
ggrc.ind | exer = get_indexer()
if settings.ENABLE_JASMINE:
# Configure Flask-Jasmine, for dev mode unit testing
from flask.ext.jasmine import Jasmine, Asset
jasmine = Jasmine(app)
jasmine.sources(
Asset("dashboard-js"),
Asset("dashboard-js-spec-helpers"))
jasmine.specs(
Asset("dashboard-js-specs"))
|
ua-snap/downscale | old/old_bin/sort_files_by_rcp.py | Python | mit | 1,564 | 0.064578 | import os, glob, shutil
from pathos import multiprocessing as mp
import pandas as pd
import numpy as np
base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
output_base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
# variables = ['rsds', 'vap' ]
for model in models:
variables = os.listdir( os.path.join( base_path, model ) )
_ = [ os.makedirs( os.path.join( base_path, model, variable ) ) for variable in variables if not os.path.exists( os.path.join( base_path, model, variable ) ) ]
for variable in variables:
print( ' '.join([model, variable]) )
output_path = os.path.join( output_base_path, model, variable, 'downscaled' )
cur_path = os.path.join( base_path, model, variable, 'downscaled' )
l = pd.Series( glob.glob( os.path.join( cur_path, '*.tif' ) ) )
grouper = [ os.path.basename(i).split( '_' )[ 5 ] for i in l ]
rcp_groups = l.groupby( grouper )
name_group = [ group for group in rcp_groups ]
names = [ i[0] for i in name_group ]
_ = [ os.makedirs( os.path.join( output_path, name ) ) for name in names if not os.path.exists( os.path.join( output_path, name ) ) ]
for count, name in enumerate( names ):
print count
group = name_group[ count ]
out_group = [ os.path.join( output_path, name, os.path.basename( i ) ) for i in group[1] | ]
def run( x, y ):
import shutil
return shutil.mo | ve( x, y )
pool = mp.Pool( 15 )
out = pool.map( lambda x: run(x[0], x[1]), zip( group[1], out_group ) )
pool.close()
|
grajasumant/ducking-octo-cyril | misctests/mandelbrot_cuda.py | Python | gpl-2.0 | 1,857 | 0.008078 | ####################################################
# Calculate Mandelbrot set and save it as a bmp image
#
# Data parallel version using Pycuda
# Create string with cuda code and let the graphics
# card farm out the work to each warp.
#
####################################################
import bmp
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
import numpy as nm
# maximal number of iterations to compute a pixel
MAX_ITER = 256
# image dimensions
from sizes import nx,ny
from pycuda.elementwise import ElementwiseKernel
complex_gpu = ElementwiseKernel(
"int nx, int ny, int maxiter, int *iteration",
"""
float zr, zi, z2;
float jf = 1.0f*(i%ny);
float rowf = 1.0f*(i/ny);
float nxf = 1.0f*nx;
| float nyf = 1.0f*ny;
float qif = 4.0f*rowf/nyf-2.0f;
float qrf = 4.0f*jf/nxf-2.0f;
iteration[i] = maxiter;
zr = 0.0f;
zi = 0.0f;
for(int n=0;n < maxiter;n++) {
float nzr = zr*zr - zi*zi + qrf;
float nzi = 2.0*zr*zi + qif;
zi = nzi;
zr = nzr;
z2 = zr*zr+zi*zi;
if(z2 > 4.0f) {
it | eration[i] = n;
break;
}
}
""",
"mandlebrot_gpu",)
# allocate a numpy array
iterations = nm.zeros(nx*ny).astype(nm.int32)
# allocate a gpu array
iterations_gpu = gpuarray.to_gpu(iterations)
# perform the gpu calculation
complex_gpu(nm.int16(nx), nm.int16(ny), nm.int16(MAX_ITER), iterations_gpu)
# copy data from the gpu array to the numpy array
iterations_gpu.get(iterations)
# reshape the array to look the way we want
image = iterations.reshape(nx,ny)
bmp.write_image('image.bmp', nx, ny, image, MAX_ITER) |
irisliu0616/Short-text-Classification | Model/Ohsumed/ohsumed_LSTM-SVM_CV.py | Python | mit | 10,500 | 0.013905 |
from __future__ import print_function
import os
import sys
import numpy as np
import time
from IPython import get_ipython
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Flatten, Activation
from keras.layers import Convolution1D, MaxPooling1D, Embedding, LSTM
from keras.models import Model, load_model
from keras.layers import Input, Dropout
from keras.optimizers import SGD, Adadelta
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from sklearn import metrics
from sklearn import *
from sklearn.model_selection import train_test_split, KFold
import theano
import csv
import h5py
MAX_SEQUENCE_LENGTH = 200 # pad zero for length longer than Max_sequence length
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.1
DROP_OUT = 0.3
Nb_EPOCH = 30
BATCH_SIZE = 50
Classes = 5
K_FOLD = 5
GLOVE_DIR = './glove.6B/'
FILENAME = 'glove.6B.' + str(EMBEDDING_DIM) + 'd.txt'
TEXT_DATA_DIR = './ohsumed_' + str(Classes)
weights_path = "Ohsumed_LSTM_model_" + str(MAX_SEQUENCE_LENGTH)+ "D_" + str(Classes) +".h5"
def write_csv_result(fname, train_accuracy, valid_accuracy, test_accuracy, time):
global header, items
global MAX_SEQUENCE_LENGTH, MAX_NB_WORDS , EMBEDDING_DIM, VALIDATION_SPLIT, DROP_OUT, Nb_EPOCH, BATCH_SIZE, Classes
header = [['Classes', 'Dropout', 'Iterations', 'Batch Size','Embedding Dimension',
'Training Accuracy', 'Validation Accuracy', 'Test Accuracy', 'Time']]
items = [Classes, DR | OP_OUT, Nb_EPOCH, BATCH_SIZE, EMBEDDING_DIM,
train_accuracy, valid_accuracy, test_accuracy, time]
header.append(items)
f = open(fname, 'wb')
| writer = csv.writer(f)
writer.writerows(header)
header = [] # reset header after each loop
f.close()
def embedding_index(GLOVE_DIR, FILENAME):
global embeddings_index
embeddings_index = {}
fname = os.path.join(GLOVE_DIR, FILENAME)
f = open(fname)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
def embeddingMatrix():
global nb_words, embedding_matrix
print('Preparing embedding matrix.')
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_data(TEXT_DATA_DIR):
print('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = os.path.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
texts.append(f.read())
f.close()
labels.append(label_id)
print('Found %s texts.' % len(texts))
global word_index, tokenizer
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
return (data, labels, labels_index)
# def train_Test_Split(data, labels):
# train_X, test_X, train_Y, test_Y = train_test_split(data, labels, test_size=VALIDATION_SPLIT)
# return (train_X, train_Y, test_X, test_Y)
def create_model():
print('Number of class: ||%d||' % (Classes))
model = Sequential()
model.add(Embedding( # Layer 0, Start
input_dim=nb_words + 1, # Size to dictionary, has to be input + 1
output_dim= EMBEDDING_DIM, # Dimensions to generate
weights=[embedding_matrix], # Initialize word weights
input_length=MAX_SEQUENCE_LENGTH,
name = "embedding_layer",
trainable=False))
model.add(LSTM(128, dropout_W=DROP_OUT, dropout_U=DROP_OUT, name = "lstm_layer"))
model.add(Dense(Classes, activation = 'sigmoid', name = "dense_one"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def train_and_evaluate_model(model, train_X, train_Y, test_X, test_Y, data):
global best_weight_path
start = time.time()
best_weight_path="weights-improvement.hdf5"
checkpoint = ModelCheckpoint(best_weight_path, monitor='acc', verbose=1, save_best_only=True, mode='max')
print("Saved best weights to disk")
callbacks_list = [checkpoint]
history = model.fit(train_X, train_Y, validation_split=VALIDATION_SPLIT, nb_epoch=Nb_EPOCH, batch_size=BATCH_SIZE, callbacks=callbacks_list)
model.load_weights(best_weight_path)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.save(weights_path)
print("Saved model to disk")
training_loss, training_accuracy = model.evaluate(train_X, train_Y)
print ("Training Loss: ", training_loss)
print ("Training Accuracy: ", training_accuracy)
eval_loss, eval_accuracy = model.evaluate(test_X, test_Y)
print ("Testing Loss: ", eval_loss)
print ("Testing Accuracy: ", eval_accuracy)
model_history = history.history
model.pop()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
intermediate_output_train = model.predict(train_X)
print ("Intermediate training output shape : ", intermediate_output_train.shape)
np.savetxt('ohsumed_' + str(MAX_SEQUENCE_LENGTH) + ' D_' + str(Classes)+ '_train_output.txt', intermediate_output_train, fmt = '%s')
intermediate_output_test = model.predict(test_X)
print ("Intermediate testing output shape : ", intermediate_output_test.shape)
np.savetxt('ohsumed_' + str(MAX_SEQUENCE_LENGTH) + ' D_' + str(Classes)+ '_test_output.txt', intermediate_output_test, fmt = '%s')
trainTime = time.time() - start
print ("Training Time : ", trainTime)
return (model, training_accuracy, eval_accuracy)
def transform_labels(train_Y, test_Y):
trainY = []
testY = []
for label in train_Y:
for idx, bit in enumerate(label):
if bit ==1:
trainY.append(idx)
for label in test_Y:
for idx, bit in enumerate(label):
if bit ==1:
testY.append(idx)
trainY = np.array(trainY)
testY = np.array(testY)
print (trainY.shape , testY.shape)
np.savetxt('ohsumed_' + str(MAX_SEQUENCE_LENGTH) + ' D_' + str(Classes)+ '_train_output_labels.txt', trainY, fmt = '%s')
np.savetxt('ohsumed_' + str(MAX_SEQUENCE_LENGTH) + ' D_' + str(Classes)+ '_test_output_labels.txt', testY, fmt = '%s')
return trainY, testY
def evaluate_with_SVM(data, labels, train_X, train_Y,test_X, test_Y):
print ("Starting SVM")
clf = svm.SVC(kernel='linear')
clf.fit(train_X, train_Y)
predict_Y = clf.predict(test_X)
s=metrics.accuracy_score(test_Y, predict_Y)
print ("SVM Testin |
muddyfish/PYKE | node/ord.py | Python | mit | 408 | 0.002451 | from node.dictionary import Dictionary
from nodes imp | ort Node
class Ord(Node):
char = ". | o"
args = 1
results = 1
def ord(self, string: str):
"""ord(string)"""
try:
return ord(string)
except TypeError:
return [[ord(i)for i in string]]
def lookup(self, word_id: int):
Dictionary.setup()
return Dictionary.word_list[word_id] |
jettify/aiogibson | tests/test_commands.py | Python | mit | 10,025 | 0 | from ._testutil import GibsonTest, run_until_complete
from aiogibson import errors
class CommandsTest(GibsonTest):
"""Gibson high level commands.
:see: http://gibson-db.in/commands/
"""
@run_until_complete
def test_set(self):
key, value = b'test:set', b'bar'
response = yield from self.gibson.set(key, value, expire=3)
self.assertEqual(response, value)
with self.assertRaises(TypeError):
yield from self.gibson.set(key, value, expire='one')
@run_until_complete
def test_get(self):
key, value = b'test:get', b'bar'
resp = yield from self.gibson.set(key, value, expire=3)
self.assertEqual(resp, value)
resp = yield from self.gibson.get(key)
self.assertEqual(resp, value)
@run_until_complete
def test_delete(self):
key, value = b'test:delete', b'zap'
resp = yield from self.gibson.set(key, value, expire=3)
self.assertEqual(resp, value)
resp = yield from self.gibson.delete(key)
self.assertEqual(resp, True)
resp = yield from self.gibson.delete(key)
self.assertEqual(resp, False)
resp = yield from self.gibson.get(key)
self.assertEqual(resp, None)
@run_until_complete
def test_ttl(self):
key, value = b'test:ttl', b'zap'
resp = yield from self.gibson.set(key, value, 3)
self.assertEqual(resp, value)
resp = yield from self.gibson.ttl(key, 10)
self.assertEqual(resp, True)
with self.assertRaises(TypeError):
yield from self.gibson.ttl(key, expire='one')
@run_until_complete
def test_inc(self):
key, value = b'test:inc', 78
resp = yield from self.gibson.set(key, value, expire=3)
resp = yield from self.gibson.get(key)
self.assertEqual(resp, b'78')
resp = yield from self.gibson.inc(key)
self.assertEqual(resp, 79)
resp = yield from self.gibson.get(key)
self.assertEqual(resp, 79)
@run_until_complete
def test_dec(se | lf):
key, value = b'test:dec', 78
resp = yield from self.gibson.set(key, value, expire=3)
resp = yield from self.gibson.get(key)
self.assertEqual(resp, b'78')
resp = yield from self.gibson.dec(key)
self.assertEqual(resp, 77)
resp = yield from self.gibson.get(key)
self.assertEqual(resp, 77)
@run_until_complete
def test_lock(self):
key, value = b'te | st:lock', b'zap'
resp = yield from self.gibson.set(key, value, 3)
self.assertEqual(resp, value)
resp = yield from self.gibson.lock(key, 10)
self.assertEqual(resp, True)
with self.assertRaises(errors.KeyLockedError):
yield from self.gibson.set(key, value, 3)
yield from self.gibson.unlock(key)
with self.assertRaises(TypeError):
yield from self.gibson.lock(key, expire='one')
def test_unlock(self):
key, value = b'test:unlock', b'zap'
resp = yield from self.gibson.set(key, value, 3)
self.assertEqual(resp, value)
resp = yield from self.gibson.lock(key, 10)
self.assertEqual(resp, True)
with self.assertRaises(errors.KeyLockedError):
yield from self.gibson.set(key, value, 3)
resp = yield from self.gibson.unlock(key)
self.assertEqual(resp, True)
resp = yield from self.gibson.set(key, 'foo', 3)
self.assertEqual(resp, b'foo')
@run_until_complete
def test_stats(self):
key, value = b'test:stats', b'zap'
resp = yield from self.gibson.set(key, value, 3)
self.assertEqual(resp, value)
resp = yield from self.gibson.stats()
test_keys = set([k for i, k in enumerate(resp) if not i % 2])
expected_keys = set([b'server_version', b'server_build_datetime',
b'server_allocator', b'server_arch',
b'server_started', b'server_time',
b'first_item_seen', b'last_item_seen',
b'total_items', b'total_compressed_items',
b'total_clients', b'total_cron_done',
b'total_connections', b'total_requests',
b'memory_available', b'memory_usable',
b'memory_used', b'memory_peak',
b'memory_fragmentation',
b'item_size_avg', b'compr_rate_avg',
b'reqs_per_client_avg'])
self.assertTrue(expected_keys.issubset(test_keys))
@run_until_complete
def test_keys(self):
key1, value1 = b'test:keys_1', b'keys:bar'
key2, value2 = b'test:keys_2', b'keys:zap'
yield from self.gibson.set(key1, value1, 3)
yield from self.gibson.set(key2, value2, 3)
resp = yield from self.gibson.keys(b'test:keys')
self.assertEqual(resp, [key1, key2])
@run_until_complete
def test_ping(self):
result = yield from self.gibson.ping()
self.assertTrue(result)
@run_until_complete
def test_meta(self):
key, value = b'test:meta_size', b'bar'
response = yield from self.gibson.set(key, value, expire=10)
self.assertEqual(response, value)
res = yield from self.gibson.meta_size(key)
self.assertEqual(res, 3)
res = yield from self.gibson.meta_encoding(key)
self.assertEqual(res, 0)
res = yield from self.gibson.meta_access(key)
self.assertTrue(1405555555 < res)
res = yield from self.gibson.meta_created(key)
self.assertTrue(1405555555 < res)
res = yield from self.gibson.meta_ttl(key)
self.assertEqual(res, 10)
res = yield from self.gibson.meta_left(key)
self.assertTrue(10 >= res)
res = yield from self.gibson.meta_lock(key)
self.assertEqual(res, 0)
@run_until_complete
def test_end(self):
self.assertTrue(self.gibson.__repr__().startswith("<Gibson"))
yield from self.gibson.end()
self.assertTrue(self.gibson.closed)
@run_until_complete
def test_mset_mget(self):
key1, value1 = b'test:mset:1', 10
key2, value2 = b'test:mset:2', 20
yield from self.gibson.set(key1, value1, 3)
yield from self.gibson.set(key2, value2, 130)
res = yield from self.gibson.mset(b'test:mset', 42)
self.assertEqual(res, 2)
res = yield from self.gibson.mget(b'test:mset')
self.assertEqual(res, [key1, b'42', key2, b'42'])
@run_until_complete
def test_mget_limit(self):
key1, value1 = b'test:mget_limit:1', b'10'
key2, value2 = b'test:mget_limit:2', b'20'
key3, value3 = b'test:mget_limit:3', b'30'
yield from self.gibson.set(key1, value1, 100)
yield from self.gibson.set(key2, value2, 100)
yield from self.gibson.set(key3, value3, 100)
res = yield from self.gibson.mget(b'test:mget_limit', 2)
self.assertEqual(len(res), 2*2)
self.assertEquals(res, [key1, value1, key2, value2])
with self.assertRaises(TypeError):
yield from self.gibson.mget(key1, limit='one')
@run_until_complete
def test_mttl(self):
key1, value1 = b'test:mttl:1', b'mttl:bar'
key2, value2 = b'test:mttl:2', b'mttl:zap'
yield from self.gibson.set(key1, value1, 3)
yield from self.gibson.set(key2, value2, 3)
resp = yield from self.gibson.mttl(b'test:mttl', 10)
self.assertEqual(resp, 2)
with self.assertRaises(TypeError):
yield from self.gibson.mttl(key1, expire='one')
@run_until_complete
def test_minc(self):
key1, value1 = b'test:minc:1', 10
key2, value2 = b'test:minc:2', 20
yield from self.gibson.set(key1, value1, 3)
yield from self.gibson.set(key2, value2, 3)
res = yield from self.gibson.minc(b'test:minc')
self.assertEqual(res, 2)
res = yield from self.gibson.mget(b'test:minc')
self.assertEqual(res, [key1, 11, key2, 21])
@run_until_complete |
omf2097/pyomftools | omftools/cli/af_compile.py | Python | mit | 405 | 0 | import argparse
from omftools.pyshadowdive.af import AFFile
if __name__ == "__mai | n__":
parser = argparse.ArgumentParser(description="Compile AF file from JSON")
parser.add_argument("input_file", help="Input .json file")
parser.add_argument("output_file", help="Output .AF file | ")
args = parser.parse_args()
AFFile.load_json(args.input_file).save_native(args.output_file)
exit(0)
|
brefsdal/sherpa | sherpa/image/pyds9_backend.py | Python | gpl-2.0 | 5,145 | 0.007191 | #_PYTHON_INSERT_SAO_COPYRIGHT_HERE_(2007)_
#_PYTHON_INSERT_GPL_LICENSE_HERE_
from itertools import izip
import numpy
import time
import ds9
from sherpa.utils import get_keyword_defaults, SherpaFloat
from sherpa.utils.err import DS9Err
_target = 'sherpa'
def _get_win():
return ds9.ds9(_target)
def doOpen():
_get_win()
def isOpen():
targets = ds9.ds9_targets()
if targets is None:
return False
if type(targets) in (list,):
for target in targets:
if _target in target:
return True
return False
def close():
if isOpen():
imager = _get_win()
imager.set("quit")
def delete_frames():
if not isOpen():
raise DS9Err('open')
imager = _get_win()
try:
imager.set("frame delete all")
return imager.set("frame new")
except:
raise DS9Err('delframe')
def get_region(coord):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
try:
regionstr = "regions -format saoimage -strip yes"
if (coord != ''):
if (coord != 'image'):
regionstr = "regions -format ciao -strip yes -system " + str(coord)
else:
regionstr = "regions -format saoimage -strip yes -system image"
reg = imager.get(regionstr)
reg = reg.replace(';','')
return reg
except:
raise DS9Err('retreg')
def image(arr, newframe=False, tile=False):
if not isOpen():
doOpen()
imager = _get_win()
if newframe is True:
try:
imager.set("frame new")
imager.set("frame last")
except:
raise DS9Err('newframe')
try:
if tile is True:
imager.set("tile yes")
else:
imager.set("tile no")
except:
raise DS9Err('settile')
time.sleep(1)
try:
# pyds9 expects shape[::-1] compared to DS9.py
# therefore transpose the image before sending
arr = numpy.asarray(arr, dtype=SherpaFloat)
imager.set_np2arr(arr.T)
except:
raise # DS9Err('noimage')
def _set_wcs(keys):
eqpos, sky, name = keys
phys = ''
wcs = "OBJECT = '%s'\n" % name
if eqpos is not None:
wcrpix = eqpos.crpix
wcrval = eqpos.crval
wcdelt = eqpos.cdelt
if sky is not None:
pcrpix = sky.crpix
pcrval = sky.crval
| pcdelt = sky.cdelt
# join together all strings with a '\n' between each
phys = '\n'.join(["WCSNAMEP = 'PHYSICAL'",
| "CTYPE1P = 'x '",
'CRVAL1P = %.14E' % pcrval[0],
'CRPIX1P = %.14E' % pcrpix[0],
'CDELT1P = %.14E' % pcdelt[0],
"CTYPE2P = 'y '",
'CRVAL2P = %.14E' % pcrval[1],
'CRPIX2P = %.14E' % pcrpix[1],
'CDELT2P = %.14E' % pcdelt[1]])
if eqpos is not None:
wcdelt = wcdelt * pcdelt
wcrpix = ((wcrpix - pcrval) /
pcdelt + pcrpix )
if eqpos is not None:
# join together all strings with a '\n' between each
wcs = wcs + '\n'.join(["RADECSYS = 'ICRS '",
"CTYPE1 = 'RA---TAN'",
'CRVAL1 = %.14E' % wcrval[0],
'CRPIX1 = %.14E' % wcrpix[0],
'CDELT1 = %.14E' % wcdelt[0],
"CTYPE2 = 'DEC--TAN'",
'CRVAL2 = %.14E' % wcrval[1],
'CRPIX2 = %.14E' % wcrpix[1],
'CDELT2 = %.14E' % wcdelt[1]])
# join the wcs and physical with '\n' between them and at the end
return ('\n'.join([wcs,phys]) + '\n')
def wcs(keys):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
info = _set_wcs( keys )
try:
# use stdin to pass the WCS info
imager.set('wcs replace', info)
except:
raise DS9Err('setwcs')
def open():
doOpen()
def set_region(reg, coord):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
try:
if (access(reg, R_OK) is True):
imager.set("regions load " + "'" + reg + "'")
else:
# Assume region string has to be in CIAO format
regions = reg.split(";")
for region in regions:
if (region != ''):
if (coord != ''):
imager.set("regions", str(coord) + ";" + region)
else:
imager.set("regions", region)
except:
raise DS9Err('badreg', str(reg))
def xpaget(arg):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
return imager.get(arg)
def xpaset(arg, data=None):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
return imager.set(arg, data)
|
cnewcome/sos | sos/plugins/networking.py | Python | gpl-2.0 | 14,527 | 0.000207 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
import os
import re
class Networking(Plugin):
"""network and device configuration
"""
plugin_name = "networking"
profiles = ('network', 'hardware', 'system')
trace_host = "www.example.com"
option_list = [(
("traceroute", "collect a traceroute to %s" % trace_host, "slow",
False)
)]
# switch to enable netstat "wide" (non-truncated) output mode
ns_wide = "-W"
def get_bridge_name(self, brctl_file):
"""Return a list for which items are bridge name according to the
output of brctl show stored in brctl_file.
"""
out = []
try:
brctl_out = open(brctl_file).read()
except:
return out
for line in brctl_out.splitlines():
if line.startswith("bridge name") \
or line.isspace() \
or line[:1].isspace():
continue
br_name, br_rest = line.split(None, 1)
out.append(br_name)
return out
def get_eth_interfaces(self, ip_link_out):
"""Return a dictionary for which keys are ethernet interface
names taken from the output of "ip -o link".
"""
out = {}
for line in ip_link_out.splitlines():
match = re.match('.*link/ether', line)
if match:
iface = match.string.split(':')[1].lstrip()
out[iface] = True
return out
def get_ip_netns(self, ip_netns_file):
"""Returns a list for which items are namespaces in the output of
ip netns stored in the ip_netns_file.
"""
out = []
try:
ip_netns_out = open(ip_netns_file).read()
except:
return out
for line in ip_netns_out.splitlines():
# If there's no namespaces, no need to continue
if line.startswith("Object \"netns\" is unknown") \
or line.isspace() \
or line[:1].isspace():
return out
out.append(line.partition(' ')[0])
return out
def get_netns_devs(self, namespace):
"""Returns a list for which items are devices that exist within
the provided namespace.
"""
ip_link_result = self.call_ext_prog("ip netns exec " + namespace +
" ip -o link")
dev_list = []
if ip_link_result['status'] == 0:
for eth in self.get_eth_interfaces(ip_link_result['output']):
dev = eth.replace('@NONE', '')
dev_list.append(dev)
return dev_list
def collect_iptable(self, tablename):
""" When running the iptables command, it unfortunately auto-loads
the modules before trying to get output. Some people explicitly
don't want this, so check if the modules are loaded before running
the command. If they aren't loaded, there can't possibly be any
relevant rules in that table """
modname = "iptable_"+tablename
if self.check_ext_prog("grep -q %s /proc/modules" % modname):
cmd = "iptables -t "+tablename+" -nvL"
self.add_cmd_output(cmd)
def collect_ip6table(self, tablename):
""" Same as function above, but for ipv6 """
modname = "ip6table_"+tablename
if self.check_ext_prog("grep -q %s /proc/modules" % modname):
cmd = "ip6tables -t "+tablename+" -nvL"
self.add_cmd_output(cmd)
def collect_nftables(self):
""" Collects nftables rulesets with 'nft' commands if the modules
are present """
if self.check_ext_prog("grep -q nf_tables /proc/modules"):
self.add_cmd_output("nft list ruleset")
def setup(self):
super(Networking, self).setup()
self.add_copy_spec([
"/proc/net/",
"/etc/nsswitch.conf",
"/etc/yp.conf",
"/etc/inetd.conf",
"/etc/xinetd.conf",
"/etc/xinetd.d",
"/etc/host*",
"/etc/resolv.conf",
"/etc/network*",
"/etc/NetworkManager/NetworkManager.conf",
"/etc/NetworkManager/system-connections",
"/etc/nftables",
"/etc/sysconfig/nftables.conf",
"/etc/nftables.conf",
"/etc/dnsmasq*",
"/sys/class/net/*/flags",
"/etc/iproute2"
])
self.add_forbidden_path("/proc/net/rpc/use-gss-proxy")
self.add_forbidden_path("/proc/net/rpc/*/channel")
self.add_forbidden_path("/proc/net/rpc/*/flush")
# Cisco CDP
self.add_forbidden_path("/proc/net/cdp")
self.add_forbidden_path("/sys/net/cdp")
# Dialogic Diva
self.add_forbidden_path("/proc/net/eicon")
self.add_cmd_output("ip -o addr", root_symlink="ip_addr")
self.add_cmd_output("route -n", root_symlink="route")
self.add_cmd_output("plotnetcfg")
self.collect_iptable("filter")
self.collect_iptable("nat")
self.collect_iptable("mangle")
self.collect_ip6table("filter")
self.collect_ip6table("nat")
self.collect_ip6table("mangle")
self.collect_nftables()
self.add_cmd_output("netstat %s -neopa" % self.ns_wide,
root_symlink="netstat")
self.add_cmd_output([
"netstat -s",
"netstat %s -agn" % self.ns_wide,
"ss -peaonmi",
"ip route show table all",
"ip -6 route show table all",
"ip -4 rule",
"ip -6 rule",
"ip -s -d link",
"ip address",
"ifenslave -a",
"ip mroute show",
"ip maddr show",
"ip neigh show",
"ip neigh show nud noarp",
"biosdevname -d",
"tc -s qdisc show",
"ip -s macsec show",
"iptables -vnxL",
"ip6tables -vnxL"
])
# There are some incompatible changes in nmcli since
# the release of NetworkManager >= 0.9.9. In addition,
# NetworkManager >= 0.9.9 will use the long names of
# "nmcli" objects.
# All versions conform to the following templates with differnt
# strings for the object being operated on.
nmcli_con_details_template = "nmcli con %s id"
nmcli_dev_details_template = "nmcli dev %s"
# test NetworkManager status for the specified major version
def test_nm_sta | tus(version=1):
| status_template = "nmcli --terse --fields RUNNING %s status"
obj_table = [
"nm", # < 0.9.9
"general" # >= 0.9.9
]
status = self.call_ext_prog(status_template % obj_table[version])
return status['output'].lower().startswith("running")
# NetworkManager >= 0.9.9 (Use short name of objects for nmcli)
if test_nm_status(version=1):
self.add_cmd_output([
"nmcli general status",
"nmcli con",
"nmcli con show --active",
"nmcli dev"])
nmcli_con_details_cmd = nmcli_con_details_template % "show"
nmcli_dev_details_cmd = nmcli_dev_details_template % "show"
# NetworkManager < 0.9.9 (Use short name of objects for nmcli)
elif test_nm_status(version=0):
|
toshywoshy/ansible | test/lib/ansible_test/_internal/classification.py | Python | gpl-3.0 | 32,223 | 0.002234 | """Classify changes in Ansible code."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import re
import time
from . import types as t
from .target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
)
from .util import (
display,
is_subdir,
)
from .import_analysis import (
get_python_module_utils_imports,
get_python_module_utils_name,
)
from .csharp_import_analysis import (
get_csharp_module_utils_imports,
get_csharp_module_utils_name,
)
from .powershell_import_analysis import (
get_powershell_module_utils_imports,
get_powershell_module_utils_name,
)
from .config import (
TestConfig,
IntegrationConfig,
)
from .metadata import (
ChangeDescription,
)
from .data import (
data_context,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args, paths, verbose_command=None):
"""
:type args: TestConfig
:type paths: list[str]
:type verbose_command: str
:rtype: ChangeDescription
"""
mapper = PathMapper(args)
commands = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths = set()
original_paths = set()
additional_paths = set()
no_integration_paths = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=2)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
none_count = 0
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers n | o integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integr | ation' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
if not tests.get(verbose_command):
# minimize excessive output from potentially thousands of files which do not trigger tests
none_count += 1
verbosity = 2
else:
verbosity = 1
if args.verbosity >= verbosity:
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
if none_count > 0 and args.verbosity < 2:
display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
for command in commands:
commands[command].discard('none')
if any(target == 'all' for target in commands[command]):
commands[command] = set(['all'])
commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
for command in commands:
if commands[command] == ['all']:
commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper:
"""Map file paths to test commands and targets."""
def __init__(self, args):
"""
:type args: TestConfig
"""
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
self.units_modules = set(target.module for target in self.units_targets if target.module)
self.units_paths = set(a for target in self.units_targets for a in target.aliases)
self.sanity_paths = set(target.path for target in self.sanity_targets)
self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'posix/' in target.aliases for m in target.modules)
self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'windows/' in target.aliases for m in target.modules)
self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'network/' in target.aliases for m in target.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
|
bittorrent/needy | needy/sources/download.py | Python | mit | 5,420 | 0.000738 | from __future__ import print_function
import io
import os
import binascii
import hashlib
import socket
import shutil
import sys
import tarfile
import tempfile
import time
import zipfile
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from ..source import S | ource
class Download(Source):
def __init__(self, url, checksum, destination, cache_directory):
Source.__init__(self)
self.url = url
self.checksum = checksum
self.destination = destination
self.cache_directory = cache_directory
| self.local_download_path = os.path.join(cache_directory, checksum)
@classmethod
def identifier(cls):
return 'download'
def clean(self):
if not self.checksum:
raise ValueError('checksums are required for downloads')
self.__fetch()
logging.info('Unpacking to %s' % self.destination)
self.__clean_destination_dir()
self.__unpack()
self.__trim_lone_dirs()
def __fetch(self):
if not os.path.exists(self.cache_directory):
os.makedirs(self.cache_directory)
if not os.path.isfile(self.local_download_path):
self.get(self.url, self.checksum, self.local_download_path)
@classmethod
def get(cls, url, checksum, destination):
logging.info('Downloading from %s' % url)
download = None
attempts = 0
download_successful = False
while not download_successful:
try:
download = urllib2.urlopen(url, timeout=5)
except urllib2.URLError as e:
logging.warning(e)
except socket.timeout as e:
logging.warning(e)
attempts = attempts + 1
download_successful = download and download.code == 200 and 'content-length' in download.info()
if not download_successful:
if attempts >= 5:
break
logging.warning('Download failed. Retrying...')
time.sleep(attempts)
if not download_successful:
raise IOError('unable to download library')
size = int(download.info()['content-length'])
progress = 0
if sys.stdout.isatty():
print('{:.1%}'.format(float(progress) / size), end='')
sys.stdout.flush()
local_file = tempfile.NamedTemporaryFile('wb', delete=False)
try:
chunk_size = 1024
while True:
chunk = download.read(chunk_size)
progress = progress + chunk_size
if sys.stdout.isatty():
print('\r{:.1%}'.format(float(progress) / size), end='')
sys.stdout.flush()
if not chunk:
break
local_file.write(chunk)
local_file.close()
if sys.stdout.isatty():
print('\r \r', end='')
sys.stdout.flush()
logging.debug('Verifying checksum...')
if not cls.verify_checksum(local_file.name, checksum):
raise ValueError('incorrect checksum')
logging.debug('Checksum verified.')
shutil.move(local_file.name, destination)
except:
os.unlink(local_file.name)
raise
del download
@classmethod
def verify_checksum(cls, path, expected):
expected = binascii.unhexlify(expected)
with open(path, 'rb') as file:
file_contents = file.read()
hash = None
if len(expected) == hashlib.md5().digest_size:
hash = hashlib.md5()
elif len(expected) == hashlib.sha1().digest_size:
hash = hashlib.sha1()
else:
raise ValueError('unknown checksum type')
hash.update(file_contents)
return expected == hash.digest()
def __clean_destination_dir(self):
if os.path.exists(self.destination):
shutil.rmtree(self.destination)
os.makedirs(self.destination)
def __unpack(self):
if tarfile.is_tarfile(self.local_download_path):
self.__tarfile_unpack()
return
if zipfile.is_zipfile(self.local_download_path):
self.__zipfile_unpack()
return
def __tarfile_unpack(self):
with open(self.local_download_path, 'rb') as file:
tar = tarfile.open(fileobj=file, mode='r|*')
tar.extractall(self.destination if isinstance(self.destination, str) else self.destination.encode(sys.getfilesystemencoding()))
del tar
def __zipfile_unpack(self):
with zipfile.ZipFile(self.local_download_path, 'r') as file:
file.extractall(self.destination)
def __trim_lone_dirs(self):
temporary_directory = os.path.join(self.cache_directory, 'temp_')
while True:
destination_contents = os.listdir(self.destination)
if len(destination_contents) != 1:
break
lone_directory = os.path.join(self.destination, destination_contents[0])
if not os.path.isdir(lone_directory):
break
shutil.move(lone_directory, temporary_directory)
shutil.rmtree(self.destination)
shutil.move(temporary_directory, self.destination)
|
wkretzsch/bcftools | misc/plot-roh.py | Python | gpl-3.0 | 13,378 | 0.023322 | #!/usr/bin/python
import glob, gzip, csv, sys, os, copy, re
csv.register_dialect('tab', delimiter='\t', quoting=csv.QUOTE_NONE)
def usage(msg=None):
if msg==None:
print 'Usage: plot.py [OPTIONS] <dir>'
print 'Options:'
print ' -H, --highlight +group1,-group2 Highlight calls shared within group1 but not present in group2'
print ' -i, --interactive Run interactively'
print ' -l, --min-length <num> Filter input regions shorter than this [0]'
print ' -n, --min-markers <num> Filter input regions with fewer marker than this [0]'
print ' -o, --outfile <file> Output file name [plot.png]'
print ' -q, --min-qual <num> Filter input regions with quality smaller than this [0]'
print ' -r, --region [^]<chr|chr:beg-end> Plot this chromosome/region only'
print ' -s, --samples <file> List of samples to show, rename or group: "name[\\tnew_name[\\tgroup]]"'
print ' -h, --help This usage text'
print 'Matplotlib options:'
print ' +adj, --adjust <str> Set plot adjust [bottom=0.18,left=0.07,right=0.98]'
print ' +dpi, --dpi <num> Set bitmap DPI [150]'
print ' +sxt, --show-xticks Show x-ticks (genomic coordinate)'
print ' +xlb, --xlabel <str> Set x-label'
print ' +xli, --xlimit <num> Extend x-range by this fraction [0.05]'
else:
print msg
sys.exit(1)
dir = None
regs = None
min_length = 0
min_markers = 0
min_qual = 0
interactive = False
sample_file = None
highlight = None
outfile = None
adjust = 'bottom=0.18,left=0.07,right=0.98'
dpi = 150
xlim = 0.05
show_xticks = False
xlabel = None
if len(sys.argv) < 2: usage()
args = sys.argv[1:]
while len(args):
if args[0]=='-r' or args[0]=='--region':
args = args[1:]
regs = args[0]
elif args[0]=='-i' or args[0]=='--interactive':
interactive = True
elif args[0]=='-l' or args[0]=='--min-length':
args = args[1:]
min_length = float(args[0])
elif args[0]=='-n' or args[0]=='--min-markers':
args = args[1:]
min_markers = float(args[0])
elif args[0]=='-o' or args[0]=='--outfile':
args = args[1:]
outfile = args[0]
elif args[0]=='-q' or args[0]=='--min-qual':
args = args[1:]
min_qual = float(args[0])
elif args[0]=='-H' or args[0]=='--highlight':
args = args[1:]
highlight = args[0]
elif args[0]=='-s' or args[0]=='--samples':
args = args[1:]
sample_file = args[0]
elif args[0]=='-?' or args[0]=='-h' or args[0]=='--help':
usage()
elif args[0]=='+adj' or args[0]=='--adjust':
args = args[1:]
adjust = args[0]
elif args[0]=='+dpi' or args[0]=='--dpi':
args = args[1:]
dpi = float(args[0])
elif args[0]=='+xlb' or args[0]=='--xlabel':
args = args[1:]
xlabel = args[0]
elif args[0]=='+sxt' or args[0]=='--show-xticks':
show_xticks = True
elif args[0]=='+xli' or args[0]=='--xlimit':
args = args[1:]
xlim = float(args[0])
else:
dir = args[0]
args = args[1:]
if interactive and outfile!=None: usage("Use -i, --interactive or -o, --outfile, but not both")
if not interactive and outfile==None: outfile = 'plot.png'
def wrap_hash(**args): return args
adjust = eval("wrap_hash("+adjust+")")
import matplotlib as mpl
for gui in ['TKAgg','GTKAgg','Qt4Agg','WXAgg','MacOSX']:
try:
mpl.use(gui,warn=False, force=True)
import matplotlib.pyplot as plt
import matplotlib.patches as patches
break
except:
continue
cols = [ '#337ab7', '#5cb85c', '#5bc0de', '#f0ad4e', '#d9534f', 'grey', 'black' ]
mpl.rcParams['axes.color_cycle'] = cols
globstr = os.path.join(dir, '*.txt.gz')
fnames = glob.glob(globstr)
if len(fnames)==0: usage("No data files found in \""+dir+"\"")
def parse_regions(str):
if str==None: return None
regs = { 'inc':[], 'exc':[] }
list = str.split(',')
key = 'inc'
if list[0][0]=='^':
key = 'exc'
list[0] = list[0][1:]
for reg in list:
x = reg.split(':')
chr = x[0]
beg = 0
end = (1<<32)-1
if len(x)>1:
(beg,end) = x[1].split('-')
beg = float(beg)
end = float(end)
regs[key].append({'chr':chr,'beg':beg,'end':end})
return regs
def region_overlap(regs,chr,beg,end):
if regs==None: return (beg,end)
if len(regs['exc'])>0:
for reg in regs['exc']:
if chr==reg['chr']: return None
return (beg,end)
if len(regs['inc'])==0: return (beg,end)
for reg in regs['inc']:
if chr!=reg['chr']: continue
if beg>reg['end']: continue
if end<reg['beg']: continue
if beg<reg['beg']: beg = reg['beg']
if end>reg['end']: end = reg['end']
return (beg,end)
return None
def parse_outfile(fname):
files = re.split(r',',fname)
bname = re.search(r'^(.+)\.[^.]+$', files[0]).group(1)
for i in range(len(files)-1):
files[i+1] = bname+"."+files[i+1]
return files
def next_region(rgs):
min = None
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if min==None:
min = [0,0]
min[0] = reg[0]
min[1] = reg[1]
if min[0] > reg[0]: min[0] = reg[0]
if min==None: return None
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if min[1] > reg[1]: min[1] = reg[1]
if min[1] > reg[0] - 1 and min[0] != reg[0]: min[1] = reg[0] - 1
return min;
def merge_regions(rg):
| rgs = copy.deepcopy(rg)
out = {}
while True:
min = next_region(rgs)
| if min==None: break
beg = min[0]
end = min[1]
smpls = []
for smpl in rgs:
if len(rgs[smpl])==0: continue
reg = rgs[smpl][0]
if reg[0] > end: continue
if reg[1] > end:
rgs[smpl][0][0] = end + 1
else:
rgs[smpl] = rgs[smpl][1:]
if smpl not in out: out[smpl] = []
smpls.append(smpl)
if len(smpls)>1:
for smpl in smpls: out[smpl].append([beg,end])
return out
def prune_regions(groups,regions):
regs = {'+':{},'-':{}}
for smpl in regions:
grp = groups[smpl]
for reg in regions[smpl]:
key = str(reg[0])+"-"+str(reg[1]) # reg=[beg,end] -> "beg-end"
if key not in regs[grp]: regs[grp][key] = 0
regs[grp][key] += 1
nexp = 0
for smpl in groups:
if groups[smpl]=='+': nexp += 1
for smpl in regions:
rm = []
for reg in regions[smpl]:
key = str(reg[0])+"-"+str(reg[1])
if key in regs['-']: rm.append(reg)
elif key not in regs['+'] or regs['+'][key]!=nexp: rm.append(reg)
for reg in rm:
if reg in regions[smpl]:
regions[smpl].remove(reg)
return regions
def parse_samples(fname,highlight):
if fname==None: return (None,None,{})
samples = {}
groups = {}
grp2sgn = {}
smpl2y = {}
# parse "+name" to create a map "name":"+"
if highlight!=None:
for grp in re.split(r',', highlight):
if grp[0]!='+' and grp[0]!='-': usage("Expected + or - before the group name: "+grp)
grp2sgn[grp[1:]] = grp[0]
# read samples, renaming them
with open(fname) as f:
for line in f:
row = re.split(r'\s+', line.rstrip('\n'))
smpl = row[0]
if len(row)==1: samples[smpl] = smpl
else:
samples[smpl] = row[1]
if len(row)==3:
grp = row[2]
if grp in grp2sgn:
grp = grp2sgn[grp]
else:
grp = '+'
groups[smpl] = grp
|
sunqm/pyscf | pyscf/symm/param.py | Python | apache-2.0 | 8,518 | 0.021601 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
# D2h C2h C2v D2 Cs Ci C2 C1
# E E E E E E E E
# C2x C2x
# C2y C2y
# C2z C2 C2 C2z C2
# i i i
# sx sx
# sy sy
# sz sh sh
POINTGROUP = ('D2h', 'C2h', 'C2v', 'D2' , 'Cs' , 'Ci' , 'C2' , 'C1' ,)
OPERATOR_TABLE = {
'D2h': ('E', 'C2x', 'C2y', 'C2z', 'i', 'sx' , 'sy' , 'sz' ),
'C2h': ('E', 'C2z', 'i', 'sz' ),
'C2v': ('E', 'C2z', 'sx' , 'sy' , ),
'D2' : ('E', 'C2x', 'C2y', 'C2z', ),
'Cs' : ('E', 'sz' ),
'Ci' : ('E', 'i', ),
'C2' : ('E', 'C2z', ),
'C1' : ('E', ),
}
#
IRREP_ID_TABLE = { # bin for XOR
'D2h': {'Ag' : 0, # 000
'B1g': 1, # 001
'B2g': 2, # 010
'B3g': 3, # 011
'Au' : 4, # 100
'B1u': 5, # 101
'B2u': 6, # 110
'B3u': 7,}, # 111
'C2h': {'Ag': 0, # 00
'Bg': 1, # 01
'Au': 2, # 10
'Bu': 3,}, # 11
'C2v': {'A1': 0, # 00
'A2': 1, # 01
'B1': 2, # 10
'B2': 3,}, # 11
'D2' : {'A' : 0, # 00
'B1': 1, # 01
'B2': 2, # 10
'B3': 3,}, # 11
'Cs' : {'A\'': 0, # 0
'A\"': 1,}, # 1
'Ci' : {'Ag': 0, # 0
'Au': 1,}, # 1
'C2' : {'A': 0, # 0
'B': 1,}, # 1
'C1' : {'A': 0,}, # 0
}
IRREP_ID_MOLPRO = {'D2h': (1, # Ag
4, # B1g
6, # B2g
7, # B3g
8, # Au
5, # B1u
3, # B2u
2), # B3u
'C2v': (1, # A1
4, # A2
2, # B1
3), # B2
'C2h': (1, # Ag
4, # Bg
2, # Au
3), # Bu
'D2' : (1, # A
4, # B1
3, # B2
2), # B3
'Cs' : (1, # A'
2), # A"
'C2' : (1, # A
2), # B
'Ci' : (1, # Ag
2), # Au
'C1' : (1,)}
# E,C2x,C2y,C2z,i, sx,sy,sz
CHARACTER_TABLE = { # XOR
'D2h': (('Ag' , 1, 1, 1, 1, 1, 1, 1, 1), # 000
('B1g', 1,-1, -1, 1, 1,-1,-1, 1), # 001
('B2g', 1,-1, 1, -1, 1,-1, 1,-1), # 010
('B3g', 1, 1, -1, -1, 1, 1,-1,-1), # 011
('Au' , 1, 1, 1, 1, -1,-1,-1,-1), # 100
('B1u', 1,-1, -1, 1, -1, 1, 1,-1), # 101
('B2u', 1,-1, 1, -1, -1, 1,-1, 1), # 110
('B3u', 1, 1, -1, -1, -1,-1, 1, 1)), # 111
# E,C2,i, sh # XOR
'C2h': (('Ag', 1, 1, 1, 1), # 00
('Bg', 1,-1, | 1,-1), # 01
('Au', 1, 1,-1,-1), # 10
('Bu', 1,-1,-1, 1)), # 11
# E,C2,sx,sy # XOR
'C2v': (('A1', 1, 1, 1, 1), # 00
| ('A2', 1, 1,-1,-1), # 01
('B1', 1,-1,-1, 1), # 10
('B2', 1,-1, 1,-1)), # 11
# E,C2x,C2y,C2z # XOR
'D2' : (('A' , 1, 1, 1, 1), # 00
('B1', 1,-1, -1, 1), # 01
('B2', 1,-1, 1, -1), # 10
('B3', 1, 1, -1, -1)), # 11
# E, sh # XOR
'Cs' : (('A\'',1, 1,), # 0
('A\"',1,-1,)), # 1
# E, i # XOR
'Ci' : (('Ag', 1, 1,), # 0
('Au', 1,-1,)), # 1
# E, C2 # XOR
'C2' : (('A', 1, 1,), # 0
('B', 1,-1,)), # 1
# E # XOR
'C1' : (('A', 1),), # 0
}
# D2h C2h C2v D2 Cs Ci C2 C1
SYMM_DESCENT_Z = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Ag', 'A2', 'B1', 'A\'', 'Ag', 'A', 'A'),
('B2g', 'Bg', 'B1', 'B2', 'A\"', 'Ag', 'B', 'A'),
('B3g', 'Bg', 'B2', 'B3', 'A\"', 'Ag', 'B', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\'', 'Au', 'A', 'A'),
('B1u', 'Au', 'A1', 'B1', 'A\'', 'Au', 'A', 'A'),
('B2u', 'Bu', 'B2', 'B2', 'A\"', 'Au', 'B', 'A'),
('B3u', 'Bu', 'B1', 'B3', 'A\"', 'Au', 'B', 'A'),
)
SYMM_DESCENT_X = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Bg', 'B2', 'B1', 'A\"', 'Ag', 'B', 'A'),
('B2g', 'Bg', 'B1', 'B2', 'A\"', 'Ag', 'B', 'A'),
('B3g', 'Ag', 'A2', 'B3', 'A\'', 'Ag', 'A', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\"', 'Au', 'A', 'A'),
('B1u', 'Bu', 'B1', 'B1', 'A\'', 'Au', 'B', 'A'),
('B2u', 'Bu', 'B2', 'B2', 'A\'', 'Au', 'B', 'A'),
('B3u', 'Au', 'A1', 'B3', 'A\"', 'Au', 'A', 'A'),
)
SYMM_DESCENT_Y = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Bg', 'B2', 'B1', 'A\"', 'Ag', 'B', 'A'),
('B2g', 'Ag', 'A2', 'B2', 'A\'', 'Ag', 'A', 'A'),
('B3g', 'Bg', 'B1', 'B3', 'A\"', 'Ag', 'B', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\"', 'Au', 'A', 'A'),
('B1u', 'Bu', 'B1', 'B1', 'A\'', 'Au', 'B', 'A'),
('B2u', 'Au', 'A1', 'B2', 'A\"', 'Au', 'A', 'A'),
('B3u', 'Bu', 'B2', 'B3', 'A\'', 'Au', 'B', 'A'),
)
SPHERIC_GTO_PARITY_ODD = (
# s
((0, 0, 0),),
# px, py, pz
((1, 0, 0),(0, 1, 0),(0, 0, 1)),
# dxy, dyz, dz2, dxz, dx2y2
((1, 1, 0),(0, 1, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# fyx2, fxyz, fyz2, fz3, fxz2, fzx2, fx3
((0, 1, 0),(1, 1, 1),(0, 1, 0),(0, 0, 1),(1, 0, 0),
(0, 0, 1),(1, 0, 0),),
# g
((1, 1, 0),(0, 1, 1),(1, 1, 0),(0, 1, 1),(0, 0, 0),
(1, 0, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# h
((0, 1, 0),(1, 1, 1),(0, 1, 0),(1, 1, 1),(0, 1, 0),
(0, 0, 1),(1, 0, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),
(1, 0, 0),),
# i
((1, 1, 0),(0, 1, 1),(1, 1, 0),(0, 1, 1),(1, 1, 0),
(0, 1, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),(1, 0, 1),
(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# j
((0, 1, 0),(1, 1, 1),(0, 1, 0),(1, 1, 1),(0, 1, 0),
(1, 1, 1),(0, 1, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),
(1, 0, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),(1, 0, 0))
)
SUBGROUP = {
'SO3': ('SO3', 'Dooh', 'Coov', 'D2h', 'D2', 'C2v', 'C2h', 'C2', 'Cs', 'Ci', 'C1'),
'Dooh':('Dooh', 'Coov', 'D2h', 'D2', 'C2v', 'C2h', 'C2', 'Cs', 'Ci', 'C1'),
'Coov':('Coov', 'C2v', 'C2', 'C1'),
'D2h': ('D2h', 'C2v', 'C2h', 'C2', 'Cs', 'Ci', 'C1'),
'D2' : ('D2' , 'C2' , 'Ci' , 'C1'),
'C2v': ('C2v', 'C2' , 'Cs' , 'C1'),
'C2h': ('C2h', 'C2' , 'Cs' , 'C1'),
'Cs' : ('Cs' , 'C1'),
'Ci' : ('Ci' , 'C1'),
'C2' : ('C2' , 'C1'),
'C1' : ('C1',),
}
D2H_OPS = |
TsarFox/chandere2 | tests/websites/test_8chan.py | Python | gpl-3.0 | 1,671 | 0 | import pytest
from chandere.errors import ChandereError
from chandere.loader import load_scraper
scraper = load_scraper("8chan")
VALID_CROSSLINK_TARGETS = [
("/tech/589254", ("tech", "589254")),
("/tech/ 589254", ("tech", "589254")),
("tech/589254", ("tech", "589254")),
("/tech 589254", ("tech", "589254")),
("tech 589254", ("tech", "589254")),
("/tech/", ("tech", None)),
("/tech", ("tech", None)),
("tech/", ("tech", None)),
("tech", ("tech", None)),
]
INVALID_CROSSLINK_TARGETS = [
"/"
]
VALID_URI_TARGETS = [
("https://8ch.net/tech/res/589254.html", ("tech", "589254")),
("http://8ch.net/tech/res/589254.html", ("tech", "589254")),
("https://8ch.net/tech/res/589254.json", ("tech", "589254")),
("http://8ch.net/tech/res/589254.json", ("tech", "589254")),
("https://8ch.net/tech/", ("tech", None)),
("http://8ch.net/tech/", ("tech", None)),
]
INVALID_URI_TARGETS = [
"https://8ch.net/",
"http://8ch.net/",
"https://google.com/",
"http://google.com/",
]
def test_pars | e_valid_uri_target():
for target, expected in VALID_URI_TARGETS:
assert scraper. | parse_target(target) == expected
def test_parse_invalid_uri_target():
for target in INVALID_URI_TARGETS:
with pytest.raises(ChandereError):
scraper.parse_target(target)
def test_parse_valid_crosslink_target():
for target, expected in VALID_CROSSLINK_TARGETS:
assert scraper.parse_target(target) == expected
def test_parse_invalid_crosslink_target():
for target in INVALID_CROSSLINK_TARGETS:
with pytest.raises(ChandereError):
scraper.parse_target(target)
|
mackjoner/peewee | playhouse/tests/test_keys.py | Python | mit | 15,919 | 0.000314 | from playhouse.tests.base import compiler
from playhouse.tests.base import database_initializer
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import PeeweeTestCase
from playhouse.tests.base import skip_if
from playhouse.tests.base import test_db
from playhouse.tests.models import *
class TestForeignKeyToNonPrimaryKey(ModelTestCase):
requires = [Package, PackageItem]
def setUp(self):
super(TestForeignKeyToNonPrimaryKey, self).setUp()
for barcode in ['101', '102']:
Package.create(barcode=barcode)
for i in range(2):
PackageItem.create(
package=barcode,
title='%s-%s' % (barcode, i))
def test_fk_resolution(self):
pi = PackageItem.get(PackageItem.title == '101-0')
self.assertEqual(pi._data['package'], '101')
self.assertEqual(pi.package, Package.get(Package.barcode == '101'))
def test_select_generation(self):
p = Package.get(Package.barcode == '101')
self.assertEqual(
[item.title for item in p.items.order_by(PackageItem.title)],
['101-0', '101-1'])
class TestMultipleForeignKey(ModelTestCase):
requires = [Manufacturer, Component, Computer]
test_values = [
['3TB', '16GB', 'i7'],
['128GB', '1GB', 'ARM'],
]
def setUp(self):
super(TestMultipleForeignKey, self).setUp()
intel = Manufacturer.create(name='Intel')
amd = Manufacturer.create(name='AMD')
kingston = Manufacturer.create(name='Kingston')
for hard_drive, memory, processor in self.test_values:
c = Computer.create(
hard_drive=Component.create(name=hard_drive),
memory=Component.create(name=memory, manufacturer=kingston),
processor=Component.create(name=processor, manufacturer=intel))
# The 2nd computer has an AMD processor.
c.processor.manufacturer = amd
c.processor.save()
def test_multi_join(self):
HDD = Component.alias()
HDDMf = Manufacturer.alias()
Memory = Component.alias()
MemoryMf = Manufacturer.alias()
Processor = Component.alias()
ProcessorMf = Manufacturer.alias()
query = (Computer
.select(
Computer,
HDD,
Memory,
Processor,
HDDMf,
MemoryMf,
ProcessorMf)
.join(HDD, on=(
Computer.hard_drive == HDD.id).alias('hard_drive'))
.join(
HDDMf,
JOIN.LEFT_OUTER,
on=(HDD.manufacturer == HDDMf.id))
.switch(Computer)
.join(Memory, on=(
Computer.memory == Memory.id).alias('memory'))
.join(
MemoryMf,
JOIN.LEFT_OUTER,
on=(Memory.manufacturer == MemoryMf.id))
.switch(Computer)
.join(Processor, on=(
Computer.processor == Processor.id).alias('processor'))
.join(
ProcessorMf,
JOIN.LEFT_OUTER,
on=(Processor.manufacturer == ProcessorMf.id))
.order_by(Computer.id))
with self.assertQueryCount(1):
vals = []
manufacturers = []
for computer in query:
components = [
computer.hard_drive,
computer.memory,
computer.processor]
vals.append([component.name for component in components])
for component in components:
if component.manufacturer:
manufacturers.append(component.manufacturer.name)
else:
manufacturers.append(None)
self.assertEqual(vals, self.test_values)
self.assertEqual(manufacturers, [
None, 'Kingston', 'Intel',
None, 'Kingston', 'AMD',
])
class TestMultipleForeignKeysJoining(ModelTestCase):
requires = [User, Relationship]
def test_multiple_fks(self):
a = User.create(username='a')
b = User.create(username='b')
c = User.create(username='c')
self.assertEqual(list(a.relationships), [])
self.assertEqual(list(a.related_to), [])
r_ab = Relationship.create(from_user=a, to_user=b)
self.assertEqual(list(a.relationships), [r_ab])
self.assertEqual(list(a.related_to), [])
self.assertEqual(list(b.relationships), [])
self.assertEqual(list(b.related_to), [r_ab])
r_bc = | Relationship.create(from_user=b, to_user=c)
following = User.select(). | join(
Relationship, on=Relationship.to_user
).where(Relationship.from_user == a)
self.assertEqual(list(following), [b])
followers = User.select().join(
Relationship, on=Relationship.from_user
).where(Relationship.to_user == a.id)
self.assertEqual(list(followers), [])
following = User.select().join(
Relationship, on=Relationship.to_user
).where(Relationship.from_user == b.id)
self.assertEqual(list(following), [c])
followers = User.select().join(
Relationship, on=Relationship.from_user
).where(Relationship.to_user == b.id)
self.assertEqual(list(followers), [a])
following = User.select().join(
Relationship, on=Relationship.to_user
).where(Relationship.from_user == c.id)
self.assertEqual(list(following), [])
followers = User.select().join(
Relationship, on=Relationship.from_user
).where(Relationship.to_user == c.id)
self.assertEqual(list(followers), [b])
class TestCompositePrimaryKey(ModelTestCase):
requires = [Tag, Post, TagPostThrough, CompositeKeyModel, User, UserThing]
def setUp(self):
super(TestCompositePrimaryKey, self).setUp()
tags = [Tag.create(tag='t%d' % i) for i in range(1, 4)]
posts = [Post.create(title='p%d' % i) for i in range(1, 4)]
p12 = Post.create(title='p12')
for t, p in zip(tags, posts):
TagPostThrough.create(tag=t, post=p)
TagPostThrough.create(tag=tags[0], post=p12)
TagPostThrough.create(tag=tags[1], post=p12)
def test_create_table_query(self):
query, params = compiler.create_table(TagPostThrough)
self.assertEqual(
query,
'CREATE TABLE "tagpostthrough" '
'("tag_id" INTEGER NOT NULL, '
'"post_id" INTEGER NOT NULL, '
'PRIMARY KEY ("tag_id", "post_id"), '
'FOREIGN KEY ("tag_id") REFERENCES "tag" ("id"), '
'FOREIGN KEY ("post_id") REFERENCES "post" ("id")'
')')
def test_get_set_id(self):
tpt = (TagPostThrough
.select()
.join(Tag)
.switch(TagPostThrough)
.join(Post)
.order_by(Tag.tag, Post.title)).get()
# Sanity check.
self.assertEqual(tpt.tag.tag, 't1')
self.assertEqual(tpt.post.title, 'p1')
tag = Tag.select().where(Tag.tag == 't1').get()
post = Post.select().where(Post.title == 'p1').get()
self.assertEqual(tpt._get_pk_value(), (tag, post))
# set_id is a no-op.
tpt._set_pk_value(None)
self.assertEqual(tpt._get_pk_value(), (tag, post))
def test_querying(self):
posts = (Post.select()
.join(TagPostThrough)
.join(Tag)
.where(Tag.tag == 't1')
.order_by(Post.title))
self.assertEqual([p.title for p in posts], ['p1', 'p12'])
tags = (Tag.select()
.join(TagPostThrough)
.join(Post)
.where(Post.title == 'p12')
.order_by(Tag.tag))
self.assertEqual([t.tag for t in |
sbordt/markovmixing | markovmixing/video_util.py | Python | mit | 1,631 | 0.043532 | """ Make videos.
"""
def matplotlib_plots_to_video(path, ffigure, n):
""" Make a video from a sequence of matplotlib figures.
The function ffigure will be called subsequently to retrieve
the sequence of matplotlib figures.
The figure must have been create | d using 'plt.figure(figsize=(19.20, 10.80), dpi=100)'.
The will video consists of n frames (or figures), where 100 frames correspond to 1 second.
The video will have a resolution of 1920*1080.
path: path of the video file (*.avi)
ffigure: callback function that takes the frame number as a single argument
n: number of frames
| """
import cv2, io
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
print "INFO: Creating a video that lasts "+`n/100.0`+" seconds."
# Define the codec and create VideoWriter object
fourcc = cv2.cv.CV_FOURCC(*'FMP4')
out = cv2.VideoWriter(path, fourcc, 100.0, (1920,1080))
for i in xrange(n):
# get the next figure
fig = ffigure(*(i,))
# figure to png in memory
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=100)
buf.seek(0)
# png in memory to PIL image
pil_image = Image.open(buf).convert('RGB')
# PIL image to numpy BGR array
open_cv_image = np.array(pil_image)
# Convert RGB to BGR
open_cv_image = open_cv_image[:, :, ::-1].copy()
# write the frame
out.write(open_cv_image)
# close buffer
buf.close()
# clear figure
plt.clf()
plt.close()
if i % 50 == 0 and i != 0:
print `i/100.0`+" second(s) completed."
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
print "Video created." |
mscansian/SigmaWebPlus | plus/xmlparser.py | Python | gpl-3.0 | 4,326 | 0.015491 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
xmlObject.py
XXX
Metodos publicos
XXX
Dependencias (dentro do projeto)
'''
from lxml.etree import XML
class aluno:
xmlData = None
alunoData = None
def get(self, key):
try: return self.alunoData[key]
except: return None
def __init__(self, xmlData):
self.xmlData = xmlData
self.alunoData = {}
xmlObject = XML(xmlData)
#Verifica root tag
if xmlObject.tag <> "SigmaWeb":
raise AlunoException("Invalid root tag '"+xmlObject.tag+"'")
for categoria in xmlObject:
if categoria.tag == "Aluno":
for info in categoria:
self.alunoData[info.tag] = info.text
elif categoria.tag == "Materias":
self.alunoData['Materias'] = []
for materia in categoria:
if materia.tag == "Materia":
dadosMateria = {
'Nome' : materia.get("Nome"),
'Cod' : materia.get("COD"),
'Turma' : materia.get("Turma"),
'Centro': materia.get("Centro"),
'Notas' : [],
'MediaParcial': None,
'Exame' : None,
'ExameReq' : None,
'MediaFinal' : None
}
for nota in materia:
if nota.tag == "Nota":
dadosMateria['Notas'].append({'Peso': nota.get("Peso").replace('%',''), 'Desc': nota.get("Desc"), 'Valo | r': self._float(nota.text, True)})
elif nota.tag == "Exame":
dadosMateria['Exame'] = self._float(nota.text, True)
| elif nota.tag == "MediaFinal":
dadosMateria['MediaFinal'] = self._float(nota.text, True)
mediaParcial, somaPesos, notasPublicadas = [0, 0, 0]
for nota in dadosMateria['Notas']:
if (nota['Valor'] != None):
notasPublicadas += 1
mediaParcial += self._float(nota['Valor']) * self._float(nota['Peso'])
somaPesos += self._float(nota['Peso'])
if dadosMateria['MediaFinal'] <> None: dadosMateria['MediaParcial'] = dadosMateria['MediaFinal']
elif dadosMateria['Exame'] <> None: dadosMateria['MediaParcial'] = ((mediaParcial / somaPesos) * 0.6 + dadosMateria['Exame']*0.4)
elif (notasPublicadas > 0): dadosMateria['MediaParcial'] = mediaParcial / somaPesos
else: dadosMateria['MediaParcial'] = None
if (notasPublicadas == len(dadosMateria['Notas']) and dadosMateria['MediaParcial'] is not None): dadosMateria['ExameReq'] = (5 - (dadosMateria['MediaParcial']*0.6))/0.4
else: dadosMateria['ExameReq'] = None
self.alunoData['Materias'].append(dadosMateria)
'''
Funcao privada de float. Tenta dar um float, caso não consiga, o retorno depende da flag 'text' e do valor enviado
num = None: Retorna None
True: Retorna o proprio valor em str
False: Retorna 0.0
'''
def _float(self, num, text=False):
if num == None: return None
try: return float(num)
except: return num if text else 0.0
class AlunoException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value) |
wonderful4228/qualitybots | src/appengine/handlers/statistics_handler.py | Python | apache-2.0 | 4,705 | 0.007864 | #!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes average scores for browsers."""
import datetime
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from handlers import base
from models import browser_score
COMPUTE_AVERAGE_SCORE_URL = '/stats/average'
COMPUTE_MULTI_SUITE_AVERAGE_URL = '/stats/multi'
class ComputeAverageScore(webapp.RequestHandler):
"""Handler for computing average suite scores.
Computes average scores for each browser involved in a suite.
"""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def get(self):
"""Calculates the average suite score per test browser."""
suite_key = self.request.get('suite')
suite = db.get(db.Key(suite_key))
test_browsers = suite.GetTestBrowsers()
# Fetch all the results (page-delta).
pd_results = []
query = suite.results
pd = query.fetch(1000)
last_cursor = query.cursor()
while pd:
pd_results.extend(pd)
query = query.with_cursor(last_cursor)
pd = query.fetch(1000)
last_cursor = query.cursor()
scores = {}
counts = {}
for test_browser in test_browsers:
scores[test_browser.key().name()] = 0
counts[test_browser.key().name()] = 0
for result in pd_results:
# Check for invalid results
i | f result.score < 0:
continue
browser_key = result.GetTestBrowser().key().name()
# Only count results that are non-ignored.
if not result.ignore:
scores[browser_key] += result.score
counts[browser_key] += 1
for test_browser in test_b | rowsers:
test_browser_key = test_browser.key().name()
scores[test_browser_key] /= float(counts[test_browser_key])
average_score = browser_score.GetOrInsertBrowserScore(suite, test_browser)
average_score.layout_score = scores[test_browser_key]
average_score.num_urls = counts[test_browser_key]
average_score.date = datetime.datetime.utcnow()
average_score.put()
self.redirect('/suite/stats?suite=%s' % suite_key)
class GetAverageScoreOfMultiSuites(base.BaseHandler):
"""Handler for computing average score among multiple suites.
Computes the average scores for each browser in the specified suites.
"""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def get(self):
"""Calculates the average score per test browser across mutliple suites."""
suite_keys = self.request.get_all('suite')
browser_scores = {}
browser_num_urls = {}
for suite_key in suite_keys:
suite = db.get(db.Key(suite_key))
for test_browser in suite.GetTestBrowsers():
score = browser_score.GetOrInsertBrowserScore(suite, test_browser)
browser_name = unicode(test_browser)
if not browser_name in browser_scores:
browser_scores[browser_name] = 0
browser_num_urls[browser_name] = 0
total_urls = browser_num_urls[browser_name] + score.num_urls
browser_scores[browser_name] = (
browser_scores[browser_name] * browser_num_urls[browser_name] +
score.layout_score * score.num_urls) / float(total_urls)
browser_num_urls[browser_name] = total_urls
score_output = []
num_urls_output = []
keys = browser_scores.keys()
keys.sort()
for browser_name in keys:
score_output.append('["%s", %f]' % (browser_name,
browser_scores[browser_name]))
num_urls_output.append('%s (%d urls)' % (browser_name,
browser_num_urls[browser_name]))
template_values = {'browser_scores': ',\n'.join(score_output),
'test_browsers': num_urls_output}
self.RenderTemplate('suite_stats.html', template_values)
application = webapp.WSGIApplication(
[(COMPUTE_AVERAGE_SCORE_URL, ComputeAverageScore),
(COMPUTE_MULTI_SUITE_AVERAGE_URL, GetAverageScoreOfMultiSuites)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
rronen/HAF-score | mean_pairwise_diff.py | Python | gpl-2.0 | 2,366 | 0.023246 | #!/usr/bin/env python
''' plotting utility for haplotype CFP peak and trough durring a selective sweep '''
import sys
import os
import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
''' internal imports, parent dir '''
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import params as p
import cfp_score as cfp
import hap_reader_ms as hread
mpop_file = "pop.mpop"
mpop_cmd = "/home/rronen/bin/mpop -i pop.ms -o %s -N 2000 -m 0.012 -g 100" % mpop_file
ms_file = "pop.ms"
ms_cmd = "/home/rronen/bin/ms 2000 1 -t 48.0 > %s" % ms_file
###############################################################################
def go_ms():
mean_ds = []
for i in range(100):
os.system(ms_cmd)
mean_d = mean_dist(ms_file, "ms")
mean_ds.append(mean_d)
print
print "mean pairwise dist using cmd: %s was %g" % (ms_cmd, np.mean(mean_ds))
print
###############################################################################
def go_mpop():
mean_ds = []
for i in range(100):
os.system(mpop_cmd)
mean_d = mean_dist(mpop_file, "mpop")
mean_ds.append(mean_d)
print
print "mean pairwise dist using cmd: %s was %g" % (mpop_cmd, np.mean(mean_ds))
print
###############################################################################
def mean_dist(sim_file, format):
if format == 'ms':
hap_mat, col_freqs, positions = hread.read_from_ms_file(sim_file)
elif format == 'mpop':
hap_mat, col_freqs, ba_col, positions = hread.read_from_mpop_file(sim_file)
else:
print "\n\t'format' must be one of 'ms' or 'mpop'. Quitting...\n\n"
sys.exit(1)
n = len(hap_mat)
dist_mat = pdist(hap_mat, 'hamming')
dist_mat = squareform(dist_mat)
mean_diff = 0
for i in range(n):
for j in range(i+1, n):
mean_diff = mean_diff + dist_mat[i,j]*len(hap_mat[0])
mean_diff = mean_diff/(n*(n-1)/2.0)
print mean_diff
return mean_diff
################### | ############################################################
if __name__ == '__main__':
# if len(sys.argv) != 3:
# print "\n\tusage: %s '<ms-like-file.ms> <'ms'/'mpop'>'\n" % sys.argv[0]
# else:
# go(sys.argv[1], sys.ar | gv[2])
go_ms() # mean pairwise dist using cmd: ms 2000 1 -t 48.0 > pop.init == 25.8554
go_mpop() # mean pairwise dist using cmd: mpop -i pop.ms -o pop.mpop -N 2000 -m 0.012 -g 100 == 19.369463199103
|
Spiderlover/Toontown | toontown/coghq/LawbotOfficeLobby_Trap00.py | Python | mit | 19,705 | 0.000457 | from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_11/models/lawbotHQ/LB_Zone04a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': N | one,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
100018: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(54.7666, 7 | .03896, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 1, 1, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
100019: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-58.0835, 7.37219, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 1, 1, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
100015: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 100005,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 0,
'isLock1Unlocked': 1,
'isLock2Unlocked': 0,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 100016,
'unlock1Event': 0,
'unlock2Event': 100017,
'unlock3Event': 0},
100016: {'type': 'laserField',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-15.1345, -13.2285, 0.25),
'hpr': Point3(90, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 0,
'gridGame': 'Random',
'gridScaleX': 42.0,
'gridScaleY': 40.0,
'laserFactor': 3,
'modelPath': 0,
'projector': Point3(20, 40, 45),
'switchId': 100019},
100017: {'type': 'laserField',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(11.2941, 28.7739, 0.28),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 1,
'gridGame': 'Random',
'gridScaleX': 42.0,
'gridScaleY': 40.0,
'laserFactor': 3,
'modelPath': 0,
'projector': Point3(20, 40, 45),
'switchId': 100018},
100001: {'type': 'model',
'name': 'copy of partition (3)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(-8.98508, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100002: {'type': 'model',
'name': 'copy of partition (4)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(5.36486, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100003: {'type': 'model',
'name': 'copy of partition (5)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(20.1513, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100004: {'type': 'model',
'name': 'copy of partition (6)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(34.9439, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100007: {'type': 'model',
'name': 'copy of partition',
'comment': '',
'parentEntId': 100006,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100008: {'type': 'model',
'name': 'copy of partition (2)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-14.9029, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100009: {'type': 'model',
'name': 'copy of partition (3)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-29.7119, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100010: {'type': 'model',
'name': 'copy of partition (4)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-44.4821, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100012: {'type': 'model',
'name': 'copy of partition (3)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100013: {'type': 'model',
'name': 'copy of partition (4)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(-14.9149, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100014: {'type': 'model',
'name': 'copy of partition (5)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(-29.7289, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100023: {'type': 'model',
'name': 'copy of partition (6)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(-44.4361, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100025: {'type': 'model',
'name': 'copy of partition (7)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(42.3323, -38.4749, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam' |
jordant/err-chef | chef.py | Python | gpl-3.0 | 2,609 | 0.014182 | #!/usr/bin/env python
import chef
import datetime
from errbot import BotPlugin, botcmd
from time import time
STALE_TIME = 60 * 30 # 30 minutes
class Chef(BotPlugin):
def pretty_time(self, time):
return datetime.datetime.fromtimestamp(int(time)).strftime('%Y-%m-%d %H:%M:%S')
def search_node (self ,args):
api = chef.autoconfigure()
if not args:
raise Exception("No Search Query")
return chef.Search('node', args)
@botcmd
def search (self, mess, args):
""" Search and return nodes """
| list = "Search results for query : %s\n" % args
| for row in self.search_node(args):
list += "%s\n" % row.object.name
return(list)
@botcmd
def roles (self, mess, args):
""" Search and return roles """
api = chef.autoconfigure()
roles = ''
for row in chef.Search('role', 'name:*' + args + '*'):
roles += "%s\n" % row.object.name
return(roles)
@botcmd
def stale(self, mess, args):
""" Search for stale nodes """
list = "Stale nodes for query : %s ( stale time %s seconds )\n" % (args, STALE_TIME)
for row in self.search_node(args):
if row.object.attributes['ohai_time']:
ago = int(time() - row.object.attributes['ohai_time'])
pretty_ohai_time = self.pretty_time(row.object.attributes['ohai_time'])
if ago >= STALE_TIME:
list += "%s ran %s seconds ago ( %s )\n" % (row.object.name, ago, pretty_ohai_time)
return(list)
@botcmd
def dpkg (self, mess, args):
""" Search installed pacakge versions via Chef API ( requires ohai-dpkg) """
(search, package) = args.split()
if not package:
raise Exception("No package")
pacakges = ''
for row in self.search_node(search):
if not row.object.attributes['dpkg']:
continue
if not row.object.attributes['dpkg'][package]:
continue
pacakges += "%s\t%s\n" % ( row.object.name , row.object.attributes['dpkg'][package]['version'] )
return(pacakges)
|
DazWorrall/ansible | lib/ansible/modules/packaging/os/openbsd_pkg.py | Python | gpl-3.0 | 26,447 | 0.005067 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrik Lundin <patrik@sigterm.se>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openbsd_pkg
author: "Patrik Lundin (@eest)"
version_added: "1.1"
short_description: Manage packages on OpenBSD.
description:
- Manage packages on OpenBSD using the pkg tools.
requirements: [ "python >= 2.5" ]
options:
name:
required: true
description:
- Name of the package.
state:
required: true
choices: [ present, latest, absent ]
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
build:
required: false
choices: [ yes, no ]
default: no
description:
- Build the package from source instead of downloading and installing
a binary. Requires that the port source tree is already installed.
Automatically builds and installs the 'sqlports' package, if it is
not already installed.
version_added: "2.1"
ports_dir:
required: false
default: /usr/ports
description:
- When used in combination with the 'build' option, allows overriding
the default ports source directory.
version_added: "2.1"
clean:
required: false
choices: [ yes, no ]
default: no
description:
- When updating or removing packages, delete the extra configuration
file(s) in the old packages which are annotated with @extra in
the packaging-list.
version_added: "2.3"
quick:
required: false
choices: [ yes, no ]
default: no
description:
- Replace or delete packages quickly; do not bother with checksums
before removing normal files.
version_added: "2.3"
'''
EXAMPLES = '''
# Make sure nmap is installed
- openbsd_pkg:
name: nmap
state: present
# Make sure nmap is the latest version
- openbsd_pkg:
name: nmap
state: latest
# Make sure nmap is not installed
- openbsd_pkg:
name: nmap
state: absent
# Make sure nmap is installed, build it from source if it is not
- openbsd_pkg:
name: nmap
state: present
build: yes
# Specify a pkg flavour with '--'
- openbsd_pkg:
name: vim--no_x11
state: present
# Specify the default flavour to avoid ambiguity errors
- openbsd_pkg:
name: vim--
state: present
# Specify a package branch (requires at least OpenBSD 6.0)
- openbsd_pkg:
name: python%3.5
state: present
# Update all packages on the system
- openbsd_pkg:
name: '*'
state: latest
# Purge a package and it's configuration files
- openbsd_pkg: name=mpd clean=yes state=absent
# Quickly remove a package without checking checksums
- openbsd_pkg: name=qt5 quick=yes state=absent
'''
import os
import platform
import re
import shlex
import sqlite3
from distutils.version import StrictVersion
# Function used for executing commands.
def execute_command(cmd, module):
# Break command line into arguments.
# This makes run_command() use shell=False which we need to not cause shell
# expansion of special characters like '*'.
cmd_args = shlex.split(cmd)
return module.run_command(cmd_args)
# Function used to find out if a package is currently installed.
def get_package_state(names, pkg_spec, module):
info_cmd = 'pkg_info -Iq'
for name in names:
command = "%s inst:%s" % (info_cmd, name)
rc, stdout, stderr = execute_command(command, module)
if stderr:
module.fail_json(msg="failed in get_package_state(): " + stderr)
if stdout:
# If the requested package name is just a stem, like "python", we may
# find multiple packages with that name.
pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
pkg_spec[name]['installed_state'] = True
else:
pkg_spec[name]['installed_state'] = False
# Function used to make sure a package is present.
def package_present(names, pkg_spec, module):
build = module.params['build']
for name in names:
# It is possible package_present() has been called from package_latest().
# In that case we do not want to operate on the whole list of names,
# only the leftovers.
if pkg_spec['package_latest_leftovers']:
if name not in pkg_spec['package_latest_leftovers']:
module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
continue
else:
module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
if build is True:
port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
if os.path.isdir(port_dir):
if pkg_spec[name]['flavor']:
flavors = pkg_spec[name]['flavor'].replace('-', ' ')
install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
elif pkg_spec[name]['subpackage']:
install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
pkg_spec[name]['subpac | kage'])
else:
install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
else:
module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
else:
install_cmd = 'pkg_add -Im'
if pkg_spec[name]['installed_state'] is False:
# Attempt to install the package
if build is True and not module.check_mode:
( | pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
else:
(pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
#
# When a specific version is supplied the return code will be 0 when
# a package is found and 1 when it is not. If a version is not
# supplied the tool will exit 0 in both cases.
#
# It is important to note that "version" relates to the
# packages-specs(7) notion of a version. If using the branch syntax
# (like "python%3.5") even though a branch name may look like a
# version string it is not used an one by pkg_add.
if pkg_spec[name]['version'] or build is True:
# Depend on the return code.
module.debug("package_present(): depending on return code for name '%s'" % name)
if pkg_spec[name]['rc']:
pkg_spec[name]['changed'] = False
else:
# Depend on stderr instead.
module.debug("package_present(): depending on stderr for name '%s'" % name)
if pkg_spec[name]['stderr']:
# Th |
rwl/PyCIM | CIM15/CDPSM/Balanced/IEC61970/Core/IdentifiedObject.py | Python | mit | 3,613 | 0.002491 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.CDPSM.Balanced.Element import Element
class IdentifiedObject(Element):
"""This is a root class to provide common identification for all classes needing identification and naming attributes
"""
def __init__(self, name='', aliasName='', Names=None, *args, **kw_args):
"""Initialises a new 'IdentifiedObject' instance.
@param name: The name is any free human readable and p | ossibly non unique text naming the object.
@param aliasName: The aliasName is free text human readable name of the object alternative to IdentifiedObject.name. It may be no | n unique and may not correlate to a naming hierarchy. The attribute aliasName is put back because of backwards compatibility between CIM relases. It is however recommended to replace aliasName with the Name class as aliasName is planned for retirement at a future time. This was decided at a joint WG13/14 meeting in Minneapolis 2010-10-06.
@param Names: All names of this identified object.
"""
#: The name is any free human readable and possibly non unique text naming the object.
self.name = name
#: The aliasName is free text human readable name of the object alternative to IdentifiedObject.name. It may be non unique and may not correlate to a naming hierarchy. The attribute aliasName is put back because of backwards compatibility between CIM relases. It is however recommended to replace aliasName with the Name class as aliasName is planned for retirement at a future time. This was decided at a joint WG13/14 meeting in Minneapolis 2010-10-06.
self.aliasName = aliasName
self._Names = []
self.Names = [] if Names is None else Names
super(IdentifiedObject, self).__init__(*args, **kw_args)
_attrs = ["name", "aliasName"]
_attr_types = {"name": str, "aliasName": str}
_defaults = {"name": '', "aliasName": ''}
_enums = {}
_refs = ["Names"]
_many_refs = ["Names"]
def getNames(self):
"""All names of this identified object.
"""
return self._Names
def setNames(self, value):
for x in self._Names:
x.IdentifiedObject = None
for y in value:
y._IdentifiedObject = self
self._Names = value
Names = property(getNames, setNames)
def addNames(self, *Names):
for obj in Names:
obj.IdentifiedObject = self
def removeNames(self, *Names):
for obj in Names:
obj.IdentifiedObject = None
|
FluidityStokes/fluidity | libspud/diamond/diamond/sliceview.py | Python | lgpl-2.1 | 3,408 | 0.010563 | # This file is part of Diamond.
#
# Diamond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diamond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diamond. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject as gobject
from gi.repository import Gtk as gtk
from . import attributewidget
from . import databuttonswidget
from . import datawidget
from . import mixedtree
class SliceView(gtk.Window):
__gsignals__ = { "on-store" : (gobject.SignalFlags.RUN_LAST, gobject.TYPE_NONE, ()),
"update-name" : (gobject.SignalFlags.RUN_LAST, gobject.TYPE_NONE, ())}
def __init__(self, parent):
gtk.Window.__init__(self)
self.set_default_size(800, 600)
self.set_title("Slice View")
self.set_modal(True)
self.set_transient_for(parent)
mainvbox = gtk.VBox()
self.vbox = gtk.VBox()
scrolledWindow = gtk.ScrolledWindow()
scrolledWindow.set_policy(gtk.PolicyType.NEVER, gtk.PolicyType.AUTOMATIC)
scrolledWindow.add_with_viewport(self.vbox)
self.databuttons = databuttonswidget.DataButtonsWidget()
self.statusbar = gtk.Statusbar()
mainvbox.pack_start(scrolledWindow, True, True, 0)
mainvbox.pack_start(self.databuttons, False, True, 0)
mainvbox.pack_start(self.statusbar, False, True, 0)
self.add(mainvbox)
self.show_all()
def update(self, node, tree):
nodes = self.get_nodes(node, tree)
if not nodes:
self.destroy()
for n in nodes:
self.vbox.pack_start(self.control(n), True, True, 0)
maxwidth = 0
for child in self.vbox.get_children():
width, height = child.label.get_size_request()
maxwidth = max(maxwidth, width)
for child in self.vbox.get_children():
child.label.set_size_request(maxwidth, -1)
self.check_resize()
def get_nodes(self, node, tree):
nodes = []
for child in tree.get_children():
if child.active:
if child.name == node.name and child.is_sliceable( | ):
nodes.append(child.get_mixed_data())
nodes += self.get_nodes(node, child)
return nodes
def control(self, node):
hbox = gtk.HBox()
label = gtk.Label(node.get_name_path())
hbox.label = label
data = datawidget.DataWidget()
data.geometry_dim_tree = self.geometry_dim_tree
data.connect("on-store", self.on_store)
data.set_buttons(self.databuttons)
data. | update(node)
attributes = attributewidget.AttributeWidget()
attributes.connect("on-store", self.on_store)
attributes.connect("update-name", self.update_name)
attributes.update(node)
hbox.pack_start(label, True, True, 0)
hbox.pack_start(data, True, True, 0)
hbox.pack_start(attributes, True, True, 0)
hbox.show_all()
return hbox
def on_store(self, widget = None):
self.emit("on-store")
def update_name(self, widget = None):
self.emit("update-name")
gobject.type_register(SliceView)
|
orwa1902/code-exemples | TicTacToe.py | Python | apache-2.0 | 4,091 | 0.044977 | board = []
for i in range(3):
board.append(["*"] * 3)
#a function that creats the board
def print_board(board):
for row in board:
print (" ".join(row))
print("Let\'s play Tic Tac Toe! the rules are simple. you, the players, is given two choices: the row and column of your symbol. note that the numbers are between 0 and 2 and not 1 and 3. have fun!")
print(print_board(board))
#tg=he game itself
for turn in range(1,10):
print("turn number: " + str(turn))
#for every odd turn there will be x turn and for every even there will be o turn. thats because the X player alwayes start
if turn == 1 or turn % 2 == 1:
print ("X turn")
X_col = int(input("choose row: "))
X_row = int(input("choose column: "))
# if given a number bigger than 2 or smaller than 0, you will lose a turn
if (X_col > 2 or X_col < 0 or X_row > 2 or X_row < 0):
print("oops, that not even on the board")
print(print_board(board))
turn = turn + 1
# same goes for picking the same spot or someone else's same spot again
elif board[X_row][X_col] == "X" or board[X_row][X_col] == "O":
print("you have already played that spot or someone else did. try again")
print(print_board(board))
else:
board[X_row][X_col] = "X"
print(print_board(board))
turn = turn + 1
else:
print("O turn")
O_col = int(input("choose row: "))
O_row = int(input("choose column: "))
if (O_col > 2 or O_col < 0 or O_row > 2 or O_row < 0):
print("oops, that not even on the board")
print(print_board(board))
turn = turn + 1
elif board[O_row][O_col] == "X" or board[O_row][O_col] == "O":
print("you have already played that spot or someone else did. try again")
print(print_board(board))
else:
board[O_row][O_col] = "O"
print(print_board(board))
turn = turn + 1
#win conditions
#left row winner
if board[0][0] == "X" and board[0][1] == "X | " and board[0][2] == "X":
x = "winner"
print("the winner is X")
break;
#middle row winner
elif board[1][0] == "X" and board[1][1] == "X" and board[1][2] == "X":
x = "winner"
print("the winner is X")
break;
#right row winner
elif board[2][0] == "X" and board[2][1] == "X" and board[2][2]=="X":
| x = "winner"
print("the winner is X")
break;
#top column winner
elif board[0][0] == "X" and board[1][0] == "X" and board[2][0] == "X":
x = "winner"
print("the winner is X")
break;
#middle column winner
elif board[0][1] == "X" and board[1][1] == "X" and board[2][1] == "X":
x = "winner"
print("the winner is X")
break;
#bottom line winner
elif board[0][2] == "X" and board[1][2] == "X" and board[2][2] == "X":
x = "winner"
print("the winner is X")
break;
#left crossover winner
elif board[0][0] == "X" and board[1][1] == "X" and board[2][2] == "X":
x = "winner"
print("the winner is X")
break;
elif board[2][0] == "X" and board[1][1] == "X" and board[0][2] == "X":
x = "winner"
print("the winner is X")
break;
#middle row winner
if board[1][0] == "O" and board[1][1] == "O" and board[1][2] == "O":
o = "winner"
print("the winner is O")
break;
#right row winner
elif board[2][0] == "O" and board[2][1] == "O" and board[2][2]=="O":
o = "winner"
print("the winner is O")
break;
#top column winner
elif board[0][0] == "O" and board[1][0] == "O" and board[2][0] == "O":
o = "winner"
print("the winner is O")
break;
#middle column winner
elif board[0][1] == "O" and board[1][1] == "O" and board[2][1] == "O":
o = "winner"
print("the winner is O")
break;
#bottom column winner
elif board[0][2] == "O" and board[1][2] == "O" and board[2][2] == "O":
o = "winner"
print("the winner is O")
break;
#left crossover winner
elif board[0][0] == "O" and board[1][1] == "O" and board[2][2] == "O":
o = "winner"
print("the winner is O")
break;
#right crossover winner
elif board[2][0] == "O" and board[1][1] == "O" and board[0][2] == "O":
o = "winner"
print("the winner is O")
break;
#the most logical condition - a tie
if turn == 9:
print("It\'s a Tie!")
break;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.