repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ProfessionalIT/maxigenios-website
|
refs/heads/master
|
sdk/google_appengine/google/appengine/tools/devappserver2/php/runtime.py
|
5
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PHP devappserver2 runtime."""
import base64
import cStringIO
import httplib
import logging
import os
import subprocess
import sys
import time
import urllib
import google
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import environ_utils
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import php
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import safe_subprocess
from google.appengine.tools.devappserver2 import wsgi_server
SDK_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'php/sdk'))
if not os.path.exists(SDK_PATH):
SDK_PATH = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),
'php/sdk'))
SETUP_PHP_PATH = os.path.join(os.path.dirname(php.__file__), 'setup.php')
class PHPRuntime(object):
"""A WSGI application that runs PHP scripts using the PHP CGI binary."""
def __init__(self, config):
logging.debug('Initializing runtime with %s', config)
self.config = config
if appinfo.MODULE_SEPARATOR not in config.version_id:
module_id = appinfo.DEFAULT_MODULE
version_id = config.version_id
else:
module_id, version_id = config.version_id.split(appinfo.MODULE_SEPARATOR)
self.environ_template = {
'APPLICATION_ID': str(config.app_id),
'CURRENT_MODULE_ID': module_id,
'CURRENT_VERSION_ID': version_id,
'DATACENTER': str(config.datacenter),
'INSTANCE_ID': str(config.instance_id),
'APPENGINE_RUNTIME': 'php',
'AUTH_DOMAIN': str(config.auth_domain),
'HTTPS': 'off',
# By default php-cgi does not allow .php files to be run directly so
# REDIRECT_STATUS must be set. See:
# http://php.net/manual/en/security.cgi-bin.force-redirect.php
'REDIRECT_STATUS': '1',
'REMOTE_API_HOST': str(config.api_host),
'REMOTE_API_PORT': str(config.api_port),
'SERVER_SOFTWARE': http_runtime_constants.SERVER_SOFTWARE,
'STDERR_LOG_LEVEL': str(config.stderr_log_level),
'TZ': 'UTC',
}
self.environ_template.update((env.key, env.value) for env in config.environ)
def make_php_cgi_environ(self, environ):
"""Returns a dict of environ for php-cgi based off the wsgi environ."""
user_environ = self.environ_template.copy()
environ_utils.propagate_environs(environ, user_environ)
user_environ['REQUEST_METHOD'] = environ.get('REQUEST_METHOD', 'GET')
user_environ['PATH_INFO'] = environ['PATH_INFO']
user_environ['QUERY_STRING'] = environ['QUERY_STRING']
# Construct the partial URL that PHP expects for REQUEST_URI
# (http://php.net/manual/en/reserved.variables.server.php) using part of
# the process described in PEP-333
# (http://www.python.org/dev/peps/pep-0333/#url-reconstruction).
user_environ['REQUEST_URI'] = urllib.quote(user_environ['PATH_INFO'])
if user_environ['QUERY_STRING']:
user_environ['REQUEST_URI'] += '?' + user_environ['QUERY_STRING']
# Modify the SCRIPT_FILENAME to specify the setup script that readies the
# PHP environment. Put the user script in REAL_SCRIPT_FILENAME.
user_environ['REAL_SCRIPT_FILENAME'] = os.path.normpath(
os.path.join(self.config.application_root,
environ[http_runtime_constants.SCRIPT_HEADER].lstrip('/')))
user_environ['SCRIPT_FILENAME'] = SETUP_PHP_PATH
user_environ['REMOTE_REQUEST_ID'] = environ[
http_runtime_constants.REQUEST_ID_ENVIRON]
# Pass the APPLICATION_ROOT so we can use it in the setup script. We will
# remove it from the environment before we execute the user script.
user_environ['APPLICATION_ROOT'] = self.config.application_root
if 'CONTENT_TYPE' in environ:
user_environ['CONTENT_TYPE'] = environ['CONTENT_TYPE']
user_environ['HTTP_CONTENT_TYPE'] = environ['CONTENT_TYPE']
if 'CONTENT_LENGTH' in environ:
user_environ['CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
user_environ['HTTP_CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
# On Windows, in order to run a side-by-side assembly the specified env
# must include a valid SystemRoot.
if 'SYSTEMROOT' in os.environ:
user_environ['SYSTEMROOT'] = os.environ['SYSTEMROOT']
# On Windows, TMP & TEMP environmental variables are used by GetTempPath
# http://msdn.microsoft.com/library/windows/desktop/aa364992(v=vs.85).aspx
if 'TMP' in os.environ:
user_environ['TMP'] = os.environ['TMP']
if 'TEMP' in os.environ:
user_environ['TEMP'] = os.environ['TEMP']
if self.config.php_config.enable_debugger:
user_environ['XDEBUG_CONFIG'] = environ.get('XDEBUG_CONFIG', '')
return user_environ
def make_php_cgi_args(self):
"""Returns an array of args for php-cgi based on self.config."""
# See http://www.php.net/manual/en/ini.core.php#ini.include-path.
include_paths = ['.', self.config.application_root, SDK_PATH]
if sys.platform == 'win32':
# See https://bugs.php.net/bug.php?id=46034 for quoting requirements.
include_path = 'include_path="%s"' % ';'.join(include_paths)
else:
include_path = 'include_path=%s' % ':'.join(include_paths)
args = [self.config.php_config.php_executable_path, '-d', include_path]
# Load php.ini from application's root.
args.extend(['-c', self.config.application_root])
if self.config.php_config.enable_debugger:
args.extend(['-d', 'xdebug.default_enable="1"'])
args.extend(['-d', 'xdebug.overload_var_dump="1"'])
args.extend(['-d', 'xdebug.remote_enable="1"'])
if self.config.php_config.xdebug_extension_path:
args.extend(['-d', 'zend_extension="%s"' %
self.config.php_config.xdebug_extension_path])
if self.config.php_config.gae_extension_path:
args.extend(['-d', 'extension="%s"' % os.path.basename(
self.config.php_config.gae_extension_path)])
args.extend(['-d', 'extension_dir="%s"' % os.path.dirname(
self.config.php_config.gae_extension_path)])
return args
def __call__(self, environ, start_response):
"""Handles an HTTP request for the runtime using a PHP executable.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
user_environ = self.make_php_cgi_environ(environ)
if 'CONTENT_LENGTH' in environ:
content = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
else:
content = ''
args = self.make_php_cgi_args()
# Handles interactive request.
request_type = environ.pop(http_runtime_constants.REQUEST_TYPE_HEADER, None)
if request_type == 'interactive':
args.extend(['-d', 'html_errors="0"'])
user_environ[http_runtime_constants.REQUEST_TYPE_HEADER] = request_type
try:
# stderr is not captured here so that it propagates to the parent process
# and gets printed out to consle.
p = safe_subprocess.start_process(args,
input_string=content,
env=user_environ,
cwd=self.config.application_root,
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
except Exception as e:
logging.exception('Failure to start PHP with: %s', args)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
return ['Failure to start the PHP subprocess with %r:\n%s' % (args, e)]
if p.returncode:
if request_type == 'interactive':
start_response('200 OK', [('Content-Type', 'text/plain')])
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
return [message.fp.read()]
else:
logging.error('php failure (%r) with:\nstdout:\n%s',
p.returncode, stdout)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
return [message.fp.read()]
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
if 'Status' in message:
status = message['Status']
del message['Status']
else:
status = '200 OK'
# Ensures that we avoid merging repeat headers into a single header,
# allowing use of multiple Set-Cookie headers.
headers = []
for name in message:
for value in message.getheaders(name):
headers.append((name, value))
start_response(status, headers)
return [message.fp.read()]
def main():
config = runtime_config_pb2.Config()
config.ParseFromString(base64.b64decode(sys.stdin.read()))
server = wsgi_server.WsgiServer(
('localhost', 0),
request_rewriter.runtime_rewriter_middleware(PHPRuntime(config)))
server.start()
print server.port
sys.stdout.close()
sys.stdout = sys.stderr
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
server.quit()
if __name__ == '__main__':
main()
|
flass/agrogenom
|
refs/heads/master
|
pipeline/scripts_agrogenom/3_Ananalysis_of_genome_histories/spegenes_to_db.py
|
1
|
#!/usr/bin/python
import sys, os, tree2
dirspegeneannot = sys.argv[1]
nfreftree = sys.argv[2]
nfout = sys.argv[3]
constrastingclades = ['N15', 'N10','N5','N4']
if len(sys.argv)>4 : maxrelax = sys.argv[4]
else: maxrelax = 2
lnfspegeneannot = os.listdir(dirspegeneannot)
fout = open(nfout, 'w')
reftree = tree2.ReferenceTree(fic=nfreftree)
dtag = {'related':'nb_related_strains_possessing_the_subfamily', 'remote':'nb_remote_strains_possessing_the_subfamily'}
nullstr = '\N'
for nfspegeneannot in lnfspegeneannot:
contrast = None
othertag = 'remote'
nf = nfspegeneannot.split('/')[-1]
cladespe = nf.split('.')[0]
abspres = nf.split('.')[1]
clade = reftree[cladespe]
for contrastspe in constrastingclades:
contrast = reftree[contrastspe]
if clade.is_child(contrast):
othertag = 'related'
break
else:
contrast = reftree
print clade.label(), 'vs. other in', contrast.label(), 'mith max', maxrelax, 'occurence in', othertag
if abspres=='specific_absence': nbother = contrast.nb_leaves() - clade.nb_leaves()
else: nbother = 0
fspegeneannot = open("%s/%s"%(dirspegeneannot, nfspegeneannot), 'r')
subfams = set()
header = fspegeneannot.readline().rstrip('\n').split('\t')
for line in fspegeneannot:
dannot = dict(zip(header, line.rstrip('\n').split('\t')))
subfam = dannot['subfamily']
if subfam in subfams: continue
else: subfams.add(subfam)
other = dannot[dtag[othertag]]
try:
relax = abs(nbother - int(other))
if relax > maxrelax: continue
except ValueError:
relax = nullstr
fout.write('\t'.join([subfam, cladespe, abspres, str(relax)])+'\n')
fspegeneannot.close()
print cladespe, abspres
fout.close()
|
wanghuan1115/sdkbox-vungle-sample
|
refs/heads/master
|
cpp/cocos2d/build/android-build.py
|
49
|
#!/usr/bin/python
# android-build.py
# Build android
import sys
import os, os.path
from optparse import OptionParser
CPP_SAMPLES = ['cpp-empty-test', 'cpp-tests', 'game-controller-test']
LUA_SAMPLES = ['lua-empty-test', 'lua-tests', 'lua-game-controller-test']
JS_SAMPLES = ['js-tests']
ALL_SAMPLES = CPP_SAMPLES + LUA_SAMPLES + JS_SAMPLES
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp tests
'lua' for short of all lua tests
'''
if 'all' in args:
return ALL_SAMPLES
targets = []
if 'cpp' in args:
targets += CPP_SAMPLES
args.remove('cpp')
if 'lua' in args:
targets += LUA_SAMPLES
args.remove('lua')
if 'js' in args:
targets += JS_SAMPLES
args.remove('js')
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(app_android_root, build_mode):
command = 'cocos compile -p android -s %s --ndk-mode %s' % (app_android_root, build_mode)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
def build_samples(target, build_mode):
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
build_targets = caculate_built_samples(target)
app_android_root = ''
target_proj_path_map = {
"cpp-empty-test": "tests/cpp-empty-test",
"game-controller-test": "tests/game-controller-test",
"cpp-tests": "tests/cpp-tests",
"lua-empty-test": "tests/lua-empty-test",
"lua-tests": "tests/lua-tests",
"lua-game-controller-test": "tests/lua-game-controller-test",
"js-tests": "tests/js-tests"
}
cocos_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
for target in build_targets:
if target in target_proj_path_map:
app_android_root = os.path.join(cocos_root, target_proj_path_map[target])
else:
print 'unknown target: %s' % target
continue
do_build(app_android_root, build_mode)
# -------------- main --------------
if __name__ == '__main__':
#parse the params
usage = """
This script is mainy used for building tests built-in with cocos2d-x.
Usage: %prog [options] [cpp-empty-test|cpp-tests|lua-empty-test|lua-tests|js-tests|cpp|lua|all]
If you are new to cocos2d-x, I recommend you start with cpp-empty-test, lua-empty-test.
You can combine these targets like this:
python android-build.py cpp-empty-test lua-empty-test
"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='It is not used anymore, because cocos console does not support it.')
parser.add_option("-p", "--platform", dest="android_platform",
help='This parameter is not used any more, just keep compatible.')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for java project,debug[default] or release. \
Get more information, \
please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
else:
try:
build_samples(args, opts.build_mode)
except Exception as e:
print e
sys.exit(1)
|
danielhers/dynet
|
refs/heads/master
|
examples/devices/xor-multidevice.py
|
6
|
# Usage:
# python xor-multidevice.py --dynet-devices CPU,GPU:0,GPU:1
# or python xor-multidevice.py --dynet-gpus 2
import sys
import dynet as dy
#xsent = True
xsent = False
HIDDEN_SIZE = 8
ITERATIONS = 2000
m = dy.Model()
trainer = dy.SimpleSGDTrainer(m)
pW1 = m.add_parameters((HIDDEN_SIZE, 2), device="GPU:1")
pb1 = m.add_parameters(HIDDEN_SIZE, device="GPU:1")
pW2 = m.add_parameters((HIDDEN_SIZE, HIDDEN_SIZE), device="GPU:0")
pb2 = m.add_parameters(HIDDEN_SIZE, device="GPU:0")
pV = m.add_parameters((1, HIDDEN_SIZE), device="CPU")
pa = m.add_parameters(1, device="CPU")
if len(sys.argv) == 2:
m.populate_from_textfile(sys.argv[1])
dy.renew_cg()
W1, b1, W2, b2, V, a = dy.parameter(pW1, pb1, pW2, pb2, pV, pa)
x = dy.vecInput(2, "GPU:1")
y = dy.scalarInput(0, "CPU")
h1 = dy.tanh((W1*x) + b1)
h1_gpu0 = dy.to_device(h1, "GPU:0")
h2 = dy.tanh((W2*h1_gpu0) + b2)
h2_cpu = dy.to_device(h2, "CPU")
if xsent:
y_pred = dy.logistic((V*h2_cpu) + a)
loss = dy.binary_log_loss(y_pred, y)
T = 1
F = 0
else:
y_pred = (V*h2_cpu) + a
loss = dy.squared_distance(y_pred, y)
T = 1
F = -1
for iter in range(ITERATIONS):
mloss = 0.0
for mi in range(4):
x1 = mi % 2
x2 = (mi // 2) % 2
x.set([T if x1 else F, T if x2 else F])
y.set(T if x1 != x2 else F)
mloss += loss.scalar_value()
loss.backward()
trainer.update()
mloss /= 4.
print("loss: %0.9f" % mloss)
x.set([F,T])
z = -(-y_pred)
print(z.scalar_value())
m.save("xor.pymodel")
dy.renew_cg()
W1, b1, W2, b2, V, a = dy.parameter(pW1, pb1, pW2, pb2, pV, pa)
x = dy.vecInput(2, "GPU:1")
y = dy.scalarInput(0, "CPU")
h1 = dy.tanh((W1*x) + b1)
h1_gpu0 = dy.to_device(h1, "GPU:0")
h2 = dy.tanh((W2*h1_gpu0) + b2)
h2_cpu = dy.to_device(h2, "CPU")
if xsent:
y_pred = dy.logistic((V*h2_cpu) + a)
else:
y_pred = (V*h2_cpu) + a
x.set([T,F])
print("TF",y_pred.scalar_value())
x.set([F,F])
print("FF",y_pred.scalar_value())
x.set([T,T])
print("TT",y_pred.scalar_value())
x.set([F,T])
print("FT",y_pred.scalar_value())
|
aleksey-sinos/ardupilot
|
refs/heads/master
|
Tools/autotest/arducopter.py
|
18
|
# fly ArduCopter in SITL
# Flight mode switch positions are set-up in arducopter.param to be
# switch 1 = Circle
# switch 2 = Land
# switch 3 = RTL
# switch 4 = Auto
# switch 5 = Loiter
# switch 6 = Stabilize
import util, pexpect, sys, time, math, shutil, os
from common import *
from pymavlink import mavutil, mavwp
import random
# get location of scripts
testdir=os.path.dirname(os.path.realpath(__file__))
FRAME='+'
TARGET='sitl'
HOME=mavutil.location(-35.362938,149.165085,584,270)
AVCHOME=mavutil.location(40.072842,-105.230575,1586,0)
homeloc = None
num_wp = 0
speedup_default = 5
def hover(mavproxy, mav, hover_throttle=1450):
mavproxy.send('rc 3 %u\n' % hover_throttle)
return True
def arm_motors(mavproxy, mav):
'''arm motors'''
print("Arming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 2000\n')
mavproxy.expect('APM: ARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_armed_wait()
print("MOTORS ARMED OK")
return True
def disarm_motors(mavproxy, mav):
'''disarm motors'''
print("Disarming motors")
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1000\n')
mavproxy.send('rc 4 1000\n')
mavproxy.expect('APM: DISARMING MOTORS')
mavproxy.send('rc 4 1500\n')
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
return True
def takeoff(mavproxy, mav, alt_min = 30, takeoff_throttle=1700):
'''takeoff get to 30m altitude'''
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 %u\n' % takeoff_throttle)
m = mav.recv_match(type='VFR_HUD', blocking=True)
if (m.alt < alt_min):
wait_altitude(mav, alt_min, (alt_min + 5))
hover(mavproxy, mav)
print("TAKEOFF COMPLETE")
return True
# loiter - fly south west, then hold loiter within 5m position and altitude
def loiter(mavproxy, mav, holdtime=10, maxaltchange=5, maxdistchange=5):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first aim south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 170):
return False
mavproxy.send('rc 4 1500\n')
#fly south east 50m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 50):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
if success:
print("Loiter OK for %u seconds" % holdtime)
else:
print("Loiter FAILED")
return success
def change_alt(mavproxy, mav, alt_min, climb_throttle=1920, descend_throttle=1080):
'''change altitude'''
m = mav.recv_match(type='VFR_HUD', blocking=True)
if(m.alt < alt_min):
print("Rise to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % climb_throttle)
wait_altitude(mav, alt_min, (alt_min + 5))
else:
print("Lower to alt:%u from %u" % (alt_min, m.alt))
mavproxy.send('rc 3 %u\n' % descend_throttle)
wait_altitude(mav, (alt_min -5), alt_min)
hover(mavproxy, mav)
return True
# fly a square in stabilize mode
def fly_square(mavproxy, mav, side=50, timeout=300):
'''fly a square, flying N then E'''
tstart = get_sim_time(mav)
success = True
# ensure all sticks in the middle
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 2 1500\n')
mavproxy.send('rc 3 1500\n')
mavproxy.send('rc 4 1500\n')
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim north
print("turn right towards north")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 10):
print("Failed to reach heading")
success = False
mavproxy.send('rc 4 1500\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan4_raw==1500', blocking=True)
# save bottom left corner of box as waypoint
print("Save WP 1 & 2")
save_wp(mavproxy, mav)
# switch back to stabilize mode
mavproxy.send('rc 3 1430\n')
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
# pitch forward to fly north
print("Going north %u meters" % side)
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save top left corner of square as waypoint
print("Save WP 3")
save_wp(mavproxy, mav)
# roll right to fly east
print("Going east %u meters" % side)
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save top right corner of square as waypoint
print("Save WP 4")
save_wp(mavproxy, mav)
# pitch back to fly south
print("Going south %u meters" % side)
mavproxy.send('rc 2 1700\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 2 1500\n')
# save bottom right corner of square as waypoint
print("Save WP 5")
save_wp(mavproxy, mav)
# roll left to fly west
print("Going west %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side):
print("Failed to reach distance of %u") % side
success = False
mavproxy.send('rc 1 1500\n')
# save bottom left corner of square (should be near home) as waypoint
print("Save WP 6")
save_wp(mavproxy, mav)
# descend to 10m
print("Descend to 10m in Loiter")
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
mavproxy.send('rc 3 1300\n')
time_left = timeout - (get_sim_time(mav) - tstart)
print("timeleft = %u" % time_left)
if time_left < 20:
time_left = 20
if not wait_altitude(mav, -10, 10, time_left):
print("Failed to reach alt of 10m")
success = False
save_wp(mavproxy, mav)
return success
def fly_RTL(mavproxy, mav, side=60, timeout=250):
'''Return, land'''
print("# Enter RTL")
mavproxy.send('switch 3\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
if(m.alt <= 1 and home_distance < 10):
return True
return False
def fly_throttle_failsafe(mavproxy, mav, side=60, timeout=180):
'''Fly east, Failsafe, return, land'''
# switch to loiter mode temporarily to stop us from rising
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
# first aim east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 135):
return False
mavproxy.send('rc 4 1500\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
hover(mavproxy, mav)
failed = False
# fly east 60 meters
print("# Going forward %u meters" % side)
mavproxy.send('rc 2 1350\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
# pull throttle low
print("# Enter Failsafe")
mavproxy.send('rc 3 900\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# check if we've reached home
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached failsafe home OK")
return True
print("Failed to land on failsafe RTL - timed out after %u seconds" % timeout)
# reduce throttle
mavproxy.send('rc 3 1100\n')
# switch back to stabilize mode
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
return False
def fly_battery_failsafe(mavproxy, mav, timeout=30):
# assume failure
success = False
# switch to loiter mode so that we hold position
mavproxy.send('switch 5\n')
wait_mode(mav, 'LOITER')
mavproxy.send("rc 3 1500\n")
# enable battery failsafe
mavproxy.send("param set FS_BATT_ENABLE 1\n")
# trigger low voltage
mavproxy.send('param set SIM_BATT_VOLTAGE 10\n')
# wait for LAND mode
new_mode = wait_mode(mav, 'LAND')
if new_mode == 'LAND':
success = True
# disable battery failsafe
mavproxy.send('param set FS_BATT_ENABLE 0\n')
# return status
if success:
print("Successfully entered LAND mode after battery failsafe")
else:
print("Failed to enter LAND mode after battery failsafe")
return success
# fly_stability_patch - fly south, then hold loiter within 5m position and altitude and reduce 1 motor to 60% efficiency
def fly_stability_patch(mavproxy, mav, holdtime=30, maxaltchange=5, maxdistchange=10):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# first south
print("turn south")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 180):
return False
mavproxy.send('rc 4 1500\n')
#fly west 80m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 80):
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow moving
if not wait_groundspeed(mav, 0, 2):
return False
success = True
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = mav.location()
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Holding loiter at %u meters for %u seconds" % (start_altitude, holdtime))
# cut motor 1 to 55% efficiency
print("Cutting motor 1 to 55% efficiency")
mavproxy.send('param set SIM_ENGINE_MUL 0.55\n')
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
delta = get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
print("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
print("Loiter alt shifted %u meters (> limit of %u)" % (alt_delta, maxaltchange))
success = False
if delta > maxdistchange:
print("Loiter shifted %u meters (> limit of %u)" % (delta, maxdistchange))
success = False
# restore motor 1 to 100% efficiency
mavproxy.send('param set SIM_ENGINE_MUL 1.0\n')
if success:
print("Stability patch and Loiter OK for %u seconds" % holdtime)
else:
print("Stability Patch FAILED")
return success
# fly_fence_test - fly east until you hit the horizontal circular fence
def fly_fence_test(mavproxy, mav, timeout=180):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# enable fence
mavproxy.send('param set FENCE_ENABLE 1\n')
# first east
print("turn east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 160):
return False
mavproxy.send('rc 4 1500\n')
# fly forward (east) at least 20m
pitching_forward = True
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 20):
return False
# start timer
tstart = get_sim_time(mav)
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
home_distance = get_distance(HOME, pos)
print("Alt: %u HomeDistance: %.0f" % (m.alt, home_distance))
# recenter pitch sticks once we reach home so we don't fly off again
if pitching_forward and home_distance < 10 :
pitching_forward = False
mavproxy.send('rc 2 1500\n')
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
if m.alt <= 1 and home_distance < 10:
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Reached home OK")
return True
# disable fence
mavproxy.send('param set FENCE_ENABLE 0\n')
# reduce throttle
mavproxy.send('rc 3 1000\n')
# switch mode to stabilize
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
mavproxy.send('switch 6\n') # stabilize mode
wait_mode(mav, 'STABILIZE')
print("Fence test failed to reach home - timed out after %u seconds" % timeout)
return False
def show_gps_and_sim_positions(mavproxy, on_off):
if on_off == True:
# turn on simulator display of gps and actual position
mavproxy.send('map set showgpspos 1\n')
mavproxy.send('map set showsimpos 1\n')
else:
# turn off simulator display of gps and actual position
mavproxy.send('map set showgpspos 0\n')
mavproxy.send('map set showsimpos 0\n')
# fly_gps_glitch_loiter_test - fly south east in loiter and test reaction to gps glitch
def fly_gps_glitch_loiter_test(mavproxy, mav, timeout=30, max_distance=20):
'''hold loiter position'''
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# turn south east
print("turn south east")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 150):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 4 1500\n')
# fly forward (south east) at least 60m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
mavproxy.send('rc 2 1500\n')
# wait for copter to slow down
if not wait_groundspeed(mav, 0, 1):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
success = True
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while tnow < tstart + timeout:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# turn off glitching if we've reached the end of the glitch list
if glitch_current >= glitch_num:
glitch_current = -1
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
else:
print("Applying glitch %u" % glitch_current)
#move onto the next glitch
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# start displaying distance moved after all glitches applied
if (glitch_current == -1):
m = mav.recv_match(type='VFR_HUD', blocking=True)
curr_pos = sim_location(mav)
moved_distance = get_distance(curr_pos, start_pos)
print("Alt: %u Moved: %.0f" % (m.alt, moved_distance))
if moved_distance > max_distance:
print("Moved over %u meters, Failed!" % max_distance)
success = False
# disable gps glitch
if glitch_current != -1:
glitch_current = -1
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
show_gps_and_sim_positions(mavproxy, False)
if success:
print("GPS glitch test passed! stayed within %u meters for %u seconds" % (max_distance, timeout))
else:
print("GPS glitch test FAILED!")
return success
# fly_gps_glitch_auto_test - fly mission and test reaction to gps glitch
def fly_gps_glitch_auto_test(mavproxy, mav, timeout=30, max_distance=100):
# set-up gps glitch array
glitch_lat = [0.0002996,0.0006958,0.0009431,0.0009991,0.0009444,0.0007716,0.0006221]
glitch_lon = [0.0000717,0.0000912,0.0002761,0.0002626,0.0002807,0.0002049,0.0001304]
glitch_num = len(glitch_lat)
print("GPS Glitches:")
for i in range(1,glitch_num):
print("glitch %d %.7f %.7f" % (i,glitch_lat[i],glitch_lon[i]))
# Fly mission #1
print("# Load copter_glitch_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_glitch_mission.txt")):
print("load copter_glitch_mission failed")
return False
# turn on simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, True)
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# wait until 100m from home
if not wait_distance(mav, 100, 5, 60):
show_gps_and_sim_positions(mavproxy, False)
return False
# record time and position
tstart = get_sim_time(mav)
tnow = tstart
start_pos = sim_location(mav)
# initialise current glitch
glitch_current = 0;
print("Apply first glitch")
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# record position for 30 seconds
while glitch_current < glitch_num:
tnow = get_sim_time(mav)
desired_glitch_num = int((tnow - tstart) * 2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# apply next glitch
if glitch_current < glitch_num:
print("Applying glitch %u" % glitch_current)
mavproxy.send('param set SIM_GPS_GLITCH_X %.7f\n' % glitch_lat[glitch_current])
mavproxy.send('param set SIM_GPS_GLITCH_Y %.7f\n' % glitch_lon[glitch_current])
# turn off glitching
print("Completed Glitches")
mavproxy.send('param set SIM_GPS_GLITCH_X 0\n')
mavproxy.send('param set SIM_GPS_GLITCH_Y 0\n')
# continue with the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# wait for arrival back home
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
while dist_to_home > 5:
if get_sim_time(mav) > (tstart + timeout):
print("GPS Glitch testing failed - exceeded timeout %u seconds" % timeout)
ret = False
break
m = mav.recv_match(type='VFR_HUD', blocking=True)
pos = mav.location()
dist_to_home = get_distance(HOME, pos)
print("Dist from home: %u" % dist_to_home)
# turn off simulator display of gps and actual position
show_gps_and_sim_positions(mavproxy, False)
print("GPS Glitch test Auto completed: passed=%s" % ret)
return ret
#fly_simple - assumes the simple bearing is initialised to be directly north
# flies a box with 100m west, 15 seconds north, 50 seconds east, 15 seconds south
def fly_simple(mavproxy, mav, side=50, timeout=120):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
#set SIMPLE mode for all flight modes
mavproxy.send('param set SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# fly south 50m
print("# Flying south %u meters" % side)
mavproxy.send('rc 1 1300\n')
if not wait_distance(mav, side, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly west 8 seconds
print("# Flying west for 8 seconds")
mavproxy.send('rc 2 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
# fly north 25 meters
print("# Flying north %u meters" % (side/2.0))
mavproxy.send('rc 1 1700\n')
if not wait_distance(mav, side/2, 5, 60):
failed = True
mavproxy.send('rc 1 1500\n')
# fly east 8 seconds
print("# Flying east for 8 seconds")
mavproxy.send('rc 2 1700\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + 8):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
#print("%u" % delta)
mavproxy.send('rc 2 1500\n')
#restore to default
mavproxy.send('param set SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_super_simple - flies a circle around home for 45 seconds
def fly_super_simple(mavproxy, mav, timeout=45):
failed = False
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# fly forward 20m
print("# Flying forward 20 meters")
mavproxy.send('rc 2 1300\n')
if not wait_distance(mav, 20, 5, 60):
failed = True
mavproxy.send('rc 2 1500\n')
#set SUPER SIMPLE mode for all flight modes
mavproxy.send('param set SUPER_SIMPLE 63\n')
# switch to stabilize mode
mavproxy.send('switch 6\n')
wait_mode(mav, 'STABILIZE')
mavproxy.send('rc 3 1430\n')
# start copter yawing slowly
mavproxy.send('rc 4 1550\n')
# roll left for timeout seconds
print("# rolling left from pilot's point of view for %u seconds" % timeout)
mavproxy.send('rc 1 1300\n')
tstart = get_sim_time(mav)
while get_sim_time(mav) < (tstart + timeout):
m = mav.recv_match(type='VFR_HUD', blocking=True)
delta = (get_sim_time(mav) - tstart)
# stop rolling and yawing
mavproxy.send('rc 1 1500\n')
mavproxy.send('rc 4 1500\n')
#restore simple mode parameters to default
mavproxy.send('param set SUPER_SIMPLE 0\n')
#hover in place
hover(mavproxy, mav)
return not failed
#fly_circle - flies a circle with 20m radius
def fly_circle(mavproxy, mav, maxaltchange=10, holdtime=36):
# hold position in loiter
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
# face west
print("turn west")
mavproxy.send('rc 4 1580\n')
if not wait_heading(mav, 270):
return False
mavproxy.send('rc 4 1500\n')
#set CIRCLE radius
mavproxy.send('param set CIRCLE_RADIUS 3000\n')
# fly forward (east) at least 100m
mavproxy.send('rc 2 1100\n')
if not wait_distance(mav, 100):
return False
# return pitch stick back to middle
mavproxy.send('rc 2 1500\n')
# set CIRCLE mode
mavproxy.send('switch 1\n') # circle mode
wait_mode(mav, 'CIRCLE')
# wait
m = mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
tstart = get_sim_time(mav)
tholdstart = get_sim_time(mav)
print("Circle at %u meters for %u seconds" % (start_altitude, holdtime))
while get_sim_time(mav) < tstart + holdtime:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("heading %u" % m.heading)
print("CIRCLE OK for %u seconds" % holdtime)
return True
# fly_auto_test - fly mission which tests a significant number of commands
def fly_auto_test(mavproxy, mav):
# Fly mission #1
print("# Load copter_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_mission.txt")):
print("load copter_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("Auto mission completed: passed=%s" % ret)
return ret
# fly_avc_test - fly AVC mission
def fly_avc_test(mavproxy, mav):
# upload mission from file
print("# Load copter_AVC2013_mission")
if not load_mission_from_file(mavproxy, mav, os.path.join(testdir, "copter_AVC2013_mission.txt")):
print("load copter_AVC2013_mission failed")
return False
# load the waypoint count
global homeloc
global num_wp
print("Fly AVC mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
# switch into AUTO mode and raise throttle
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
mavproxy.send('rc 3 1500\n')
# fly the mission
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
# set throttle to minimum
mavproxy.send('rc 3 1000\n')
# wait for disarm
mav.motors_disarmed_wait()
print("MOTORS DISARMED OK")
print("AVC mission completed: passed=%s" % ret)
return ret
def land(mavproxy, mav, timeout=60):
'''land the quad'''
print("STARTING LANDING")
mavproxy.send('switch 2\n') # land mode
wait_mode(mav, 'LAND')
print("Entered Landing Mode")
ret = wait_altitude(mav, -5, 1)
print("LANDING: ok= %s" % ret)
return ret
def fly_mission(mavproxy, mav, height_accuracy=-1, target_altitude=None):
'''fly a mission from a file'''
global homeloc
global num_wp
print("test: Fly a mission from 1 to %u" % num_wp)
mavproxy.send('wp set 1\n')
mavproxy.send('switch 4\n') # auto mode
wait_mode(mav, 'AUTO')
ret = wait_waypoint(mav, 0, num_wp-1, timeout=500, mode='AUTO')
expect_msg = "Reached Command #%u" % (num_wp-1)
if (ret):
mavproxy.expect(expect_msg)
print("test: MISSION COMPLETE: passed=%s" % ret)
# wait here until ready
mavproxy.send('switch 5\n') # loiter mode
wait_mode(mav, 'LOITER')
return ret
def load_mission_from_file(mavproxy, mav, filename):
'''Load a mission from a file to flight controller'''
global num_wp
mavproxy.send('wp load %s\n' % filename)
mavproxy.expect('flight plan received')
mavproxy.send('wp list\n')
mavproxy.expect('Requesting [0-9]+ waypoints')
# update num_wp
wploader = mavwp.MAVWPLoader()
wploader.load(filename)
num_wp = wploader.count()
return True
def save_mission_to_file(mavproxy, mav, filename):
global num_wp
mavproxy.send('wp save %s\n' % filename)
mavproxy.expect('Saved ([0-9]+) waypoints')
num_wp = int(mavproxy.match.group(1))
print("num_wp: %d" % num_wp)
return True
def setup_rc(mavproxy):
'''setup RC override control'''
for chan in range(1,9):
mavproxy.send('rc %u 1500\n' % chan)
# zero throttle
mavproxy.send('rc 3 1000\n')
def fly_ArduCopter(viewerip=None, map=False):
'''fly ArduCopter in SIL
you can pass viewerip as an IP address to optionally send fg and
mavproxy packets too for local viewing of the flight in real time
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
home = "%f,%f,%u,%u" % (HOME.lat, HOME.lng, HOME.alt, HOME.heading)
sil = util.start_SIL('ArduCopter', wipe=True, model='+', home=home, speedup=speedup_default)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter')
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/copter_params.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('ArduCopter', model='+', home=home, speedup=speedup_default)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --quadcopter --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/ArduCopter-test.tlog")
print("buildlog=%s" % buildlog)
copyTLog = False
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
print( "WARN: Failed to create symlink: " + logfile + " => " + buildlog + ", Will copy tlog manually to target location" )
copyTLog = True
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
# wait 10sec to allow EKF to settle
wait_seconds(mav, 10)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a square in Stabilize mode
print("#")
print("########## Fly a square and save WPs with CH7 switch ##########")
print("#")
if not fly_square(mavproxy, mav):
failed_test_msg = "fly_square failed"
print(failed_test_msg)
failed = True
# save the stored mission to file
print("# Save out the CH7 mission to file")
if not save_mission_to_file(mavproxy, mav, os.path.join(testdir, "ch7_mission.txt")):
failed_test_msg = "save_mission_to_file failed"
print(failed_test_msg)
failed = True
# fly the stored mission
print("# Fly CH7 saved mission")
if not fly_mission(mavproxy, mav,height_accuracy = 0.5, target_altitude=10):
failed_test_msg = "fly ch7_mission failed"
print(failed_test_msg)
failed = True
# Throttle Failsafe
print("#")
print("########## Test Failsafe ##########")
print("#")
if not fly_throttle_failsafe(mavproxy, mav):
failed_test_msg = "fly_throttle_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Battery failsafe
if not fly_battery_failsafe(mavproxy, mav):
failed_test_msg = "fly_battery_failsafe failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Stability patch
print("#")
print("########## Test Stability Patch ##########")
print("#")
if not fly_stability_patch(mavproxy, mav, 30):
failed_test_msg = "fly_stability_patch failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after stab patch failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fence test
print("#")
print("########## Test Horizontal Fence ##########")
print("#")
if not fly_fence_test(mavproxy, mav, 180):
failed_test_msg = "fly_fence_test failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch Loiter test
print("# GPS Glitch Loiter Test")
if not fly_gps_glitch_loiter_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_loiter_test failed"
print(failed_test_msg)
failed = True
# RTL after GPS Glitch Loiter test
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL failed"
print(failed_test_msg)
failed = True
# Fly GPS Glitch test in auto mode
print("# GPS Glitch Auto Test")
if not fly_gps_glitch_auto_test(mavproxy, mav):
failed_test_msg = "fly_gps_glitch_auto_test failed"
print(failed_test_msg)
failed = True
# take-off ahead of next test
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Loiter for 10 seconds
print("#")
print("########## Test Loiter for 10 seconds ##########")
print("#")
if not loiter(mavproxy, mav):
failed_test_msg = "loiter failed"
print(failed_test_msg)
failed = True
# Loiter Climb
print("#")
print("# Loiter - climb to 30m")
print("#")
if not change_alt(mavproxy, mav, 30):
failed_test_msg = "change_alt climb failed"
print(failed_test_msg)
failed = True
# Loiter Descend
print("#")
print("# Loiter - descend to 20m")
print("#")
if not change_alt(mavproxy, mav, 20):
failed_test_msg = "change_alt descend failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after Loiter climb/descend failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Simple mode
print("# Fly in SIMPLE mode")
if not fly_simple(mavproxy, mav):
failed_test_msg = "fly_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Fly a circle in super simple mode
print("# Fly a circle in SUPER SIMPLE mode")
if not fly_super_simple(mavproxy, mav):
failed_test_msg = "fly_super_simple failed"
print(failed_test_msg)
failed = True
# RTL
print("# RTL #")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after super simple mode failed"
print(failed_test_msg)
failed = True
# Takeoff
print("# Takeoff")
if not takeoff(mavproxy, mav, 10):
failed_test_msg = "takeoff failed"
print(failed_test_msg)
failed = True
# Circle mode
print("# Fly CIRCLE mode")
if not fly_circle(mavproxy, mav):
failed_test_msg = "fly_circle failed"
print(failed_test_msg)
failed = True
# RTL
print("#")
print("########## Test RTL ##########")
print("#")
if not fly_RTL(mavproxy, mav):
failed_test_msg = "fly_RTL after circle failed"
print(failed_test_msg)
failed = True
print("# Fly copter mission")
if not fly_auto_test(mavproxy, mav):
failed_test_msg = "fly_auto_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew copter mission OK")
# wait for disarm
mav.motors_disarmed_wait()
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/ArduCopter-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if os.path.exists('ArduCopter-valgrind.log'):
os.chmod('ArduCopter-valgrind.log', 0644)
shutil.copy("ArduCopter-valgrind.log", util.reltopdir("../buildlogs/ArduCopter-valgrind.log"))
# [2014/05/07] FC Because I'm doing a cross machine build (source is on host, build is on guest VM) I cannot hard link
# This flag tells me that I need to copy the data out
if copyTLog:
shutil.copy(logfile, buildlog)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
def fly_CopterAVC(viewerip=None, map=False):
'''fly ArduCopter in SIL for AVC2013 mission
'''
global homeloc
if TARGET != 'sitl':
util.build_SIL('ArduCopter', target=TARGET)
home = "%f,%f,%u,%u" % (AVCHOME.lat, AVCHOME.lng, AVCHOME.alt, AVCHOME.heading)
sil = util.start_SIL('ArduCopter', wipe=True, model='heli', home=home, speedup=speedup_default)
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options='--sitl=127.0.0.1:5501 --out=127.0.0.1:19550')
mavproxy.expect('Received [0-9]+ parameters')
# setup test parameters
mavproxy.send("param load %s/Helicopter.parm\n" % testdir)
mavproxy.expect('Loaded [0-9]+ parameters')
# reboot with new parameters
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL('ArduCopter', model='heli', home=home, speedup=speedup_default)
options = '--sitl=127.0.0.1:5501 --out=127.0.0.1:19550 --streamrate=5'
if viewerip:
options += ' --out=%s:14550' % viewerip
if map:
options += ' --map'
mavproxy = util.start_MAVProxy_SIL('ArduCopter', options=options)
mavproxy.expect('Telemetry log: (\S+)')
logfile = mavproxy.match.group(1)
print("LOGFILE %s" % logfile)
buildlog = util.reltopdir("../buildlogs/CopterAVC-test.tlog")
print("buildlog=%s" % buildlog)
if os.path.exists(buildlog):
os.unlink(buildlog)
try:
os.link(logfile, buildlog)
except Exception:
pass
# the received parameters can come before or after the ready to fly message
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
mavproxy.expect(['Received [0-9]+ parameters', 'Ready to FLY'])
util.expect_setup_callback(mavproxy, expect_callback)
expect_list_clear()
expect_list_extend([sil, mavproxy])
if map:
mavproxy.send('map icon 40.072467969730496 -105.2314389590174\n')
mavproxy.send('map icon 40.072600990533829 -105.23146100342274\n')
# get a mavlink connection going
try:
mav = mavutil.mavlink_connection('127.0.0.1:19550', robust_parsing=True)
except Exception, msg:
print("Failed to start mavlink connection on 127.0.0.1:19550" % msg)
raise
mav.message_hooks.append(message_hook)
mav.idle_hooks.append(idle_hook)
failed = False
failed_test_msg = "None"
try:
mav.wait_heartbeat()
setup_rc(mavproxy)
homeloc = mav.location()
print("Lowering rotor speed")
mavproxy.send('rc 8 1000\n')
# wait 20sec to allow EKF to settle
wait_seconds(mav, 20)
# Arm
print("# Arm motors")
if not arm_motors(mavproxy, mav):
failed_test_msg = "arm_motors failed"
print(failed_test_msg)
failed = True
print("Raising rotor speed")
mavproxy.send('rc 8 2000\n')
print("# Fly AVC mission")
if not fly_avc_test(mavproxy, mav):
failed_test_msg = "fly_avc_test failed"
print(failed_test_msg)
failed = True
else:
print("Flew AVC mission OK")
print("Lowering rotor speed")
mavproxy.send('rc 8 1000\n')
#mission includes disarm at end so should be ok to download logs now
if not log_download(mavproxy, mav, util.reltopdir("../buildlogs/CopterAVC-log.bin")):
failed_test_msg = "log_download failed"
print(failed_test_msg)
failed = True
except pexpect.TIMEOUT, failed_test_msg:
failed_test_msg = "Timeout"
failed = True
mav.close()
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
if failed:
print("FAILED: %s" % failed_test_msg)
return False
return True
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/smartEnter/methodParameterClassMethod.py
|
12
|
class MyClass:
@classmethod
def method<caret>
|
roryj/apartmenthunter
|
refs/heads/master
|
src/apartment.py
|
1
|
#!/usr/bin/python
from random import randint
import library.dateutil.parser as parse
from library.dateutil.relativedelta import *
from library.dateutil.tz import tzlocal
from datetime import *
from math import pow, log, log10, e
class Apartment(object):
def __init__(self, title, link, description, created):
self.description = description
self.link = link
self.created = created
temp = title.split()
self.size = temp.pop().split('<')[0].replace('ft', '')
self.bedrooms = temp.pop()
self.price = temp.pop().replace('$', '')
self.title = ' '.join(temp)
self.ranking = self.__getRanking()
def __str__(self):
return self.title + "\r\nPrice: $" + self.price + "\r\nSize: " + self.size + "sqft\r\nBedrooms: " + self.bedrooms + "\r\nDescription: " + self.description + "\r\n" + self.link + "\r\nCreated: " + self.created
def __eq__(self, other):
return self.title == other.title and self.created == other.created
def __hash__(self):
return hash(('title', self.title, 'created', self.created))
def formatForEmail(self):
return "<html><a href='" + self.link + "'><h1>" + self.title + """</h1></a>
<b>Rating:</b> """ + str(self.ranking) + """<br/>
<b>Price:</b> $""" + str(self.price) + """<br/>
<b>Size:</b> """ + str(self.size) + """sqft<br/>
<b>Bedrooms:</b> """ + self.bedrooms + """<br/>
<b>Description:</b> """ + self.description + """<br/>
<b>Created:</b> """ + self.created + "</html>"
def __getRanking(self):
ranking = ((int(self.size) / 600) * 4) + (pow((1800 / int(self.price)), 2) * 6)
# print("Now: ", datetime.now(tzlocal()).timestamp())
# print("Posted On: ", parse.parse(self.created).timestamp())
# print("Ranking from time: ", (1 / 2.592) * pow((14400 / (datetime.now(tzlocal()).timestamp() - parse.parse(self.created).timestamp())), 2) * 10)
# print("Ranking2 from time: ", log(14400 / (datetime.now(tzlocal()).timestamp() - parse.parse(self.created).timestamp())))
# print("Ranking3 from time: ", log(14400 / (datetime.now(tzlocal()).timestamp() - parse.parse(self.created).timestamp())))
# test = pow((14400 / (datetime.now(tzlocal()).timestamp() - parse.parse(self.created).timestamp())), 2)
# print("Ranking4 from time: ", pow(e, test) * 14)
ranking += (10 / 2.592) * pow((14400 / (datetime.now(tzlocal()).timestamp() - parse.parse(self.created).timestamp())), 2)
if "balcony" in self.description:
ranking += 4
if "view" in self.description:
ranking += 4
if "sound" in self.description:
ranking += 5
if "water" in self.description:
ranking += 5
if "elliot" in self.description:
ranking += 5
if "nook" in self.description:
ranking += 8
if "island" in self.description:
ranking += 8
print("Ranking created for ", self.link, ": ", ranking)
return ranking
|
ryfeus/lambda-packs
|
refs/heads/master
|
Selenium_PhantomJS/source/pkg_resources/_vendor/packaging/_structures.py
|
1152
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
|
mhaessig/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/set-cookie-secure_wsh.py
|
119
|
#!/usr/bin/python
import urlparse
from mod_pywebsocket import common, msgutil, util
from mod_pywebsocket.handshake import hybi
def web_socket_do_extra_handshake(request):
url_parts = urlparse.urlsplit(request.uri)
request.connection.write('HTTP/1.1 101 Switching Protocols:\x0D\x0AConnection: Upgrade\x0D\x0AUpgrade: WebSocket\x0D\x0ASet-Cookie: ws_test_'+(url_parts.query or '')+'=test; Secure; Path=/\x0D\x0ASec-WebSocket-Origin: '+request.ws_origin+'\x0D\x0ASec-WebSocket-Accept: '+hybi.compute_accept(request.headers_in.get(common.SEC_WEBSOCKET_KEY_HEADER))[0]+'\x0D\x0A\x0D\x0A')
return
def web_socket_transfer_data(request):
while True:
return
|
40223211/cadpbtest0615
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/select.py
|
730
|
"""
borrowed from jython
https://bitbucket.org/jython/jython/raw/28a66ba038620292520470a0bb4dc9bb8ac2e403/Lib/select.py
"""
#import java.nio.channels.SelectableChannel
#import java.nio.channels.SelectionKey
#import java.nio.channels.Selector
#from java.nio.channels.SelectionKey import OP_ACCEPT, OP_CONNECT, OP_WRITE, OP_READ
import errno
import os
import queue
import socket
class error(Exception): pass
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : lambda: <code that raises the python equivalent>
#(java.nio.channels.ClosedChannelException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.CancelledKeyException, ALL) : error(errno.ENOTCONN, 'Socket is not connected'),
#(java.nio.channels.IllegalBlockingModeException, ALL) : error(errno.ESOCKISBLOCKING, 'socket must be in non-blocking mode'),
}
def _map_exception(exc, circumstance=ALL):
try:
mapped_exception = _exception_map[(exc.__class__, circumstance)]
mapped_exception.java_exception = exc
return mapped_exception
except KeyError:
return error(-1, 'Unmapped java exception: <%s:%s>' % (exc.toString(), circumstance))
POLLIN = 1
POLLOUT = 2
# The following event types are completely ignored on jython
# Java does not support them, AFAICT
# They are declared only to support code compatibility with cpython
POLLPRI = 4
POLLERR = 8
POLLHUP = 16
POLLNVAL = 32
def _getselectable(selectable_object):
try:
channel = selectable_object.getchannel()
except:
try:
channel = selectable_object.fileno().getChannel()
except:
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
if channel and not isinstance(channel, java.nio.channels.SelectableChannel):
raise TypeError("Object '%s' is not watchable" % selectable_object,
errno.ENOTSOCK)
return channel
class poll:
def __init__(self):
self.selector = java.nio.channels.Selector.open()
self.chanmap = {}
self.unconnected_sockets = []
def _register_channel(self, socket_object, channel, mask):
jmask = 0
if mask & POLLIN:
# Note that OP_READ is NOT a valid event on server socket channels.
if channel.validOps() & OP_ACCEPT:
jmask = OP_ACCEPT
else:
jmask = OP_READ
if mask & POLLOUT:
if channel.validOps() & OP_WRITE:
jmask |= OP_WRITE
if channel.validOps() & OP_CONNECT:
jmask |= OP_CONNECT
selectionkey = channel.register(self.selector, jmask)
self.chanmap[channel] = (socket_object, selectionkey)
def _check_unconnected_sockets(self):
temp_list = []
for socket_object, mask in self.unconnected_sockets:
channel = _getselectable(socket_object)
if channel is not None:
self._register_channel(socket_object, channel, mask)
else:
temp_list.append( (socket_object, mask) )
self.unconnected_sockets = temp_list
def register(self, socket_object, mask = POLLIN|POLLOUT|POLLPRI):
try:
channel = _getselectable(socket_object)
if channel is None:
# The socket is not yet connected, and thus has no channel
# Add it to a pending list, and return
self.unconnected_sockets.append( (socket_object, mask) )
return
self._register_channel(socket_object, channel, mask)
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def unregister(self, socket_object):
try:
channel = _getselectable(socket_object)
self.chanmap[channel][1].cancel()
del self.chanmap[channel]
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _dopoll(self, timeout):
if timeout is None or timeout < 0:
self.selector.select()
else:
try:
timeout = int(timeout)
if not timeout:
self.selector.selectNow()
else:
# No multiplication required: both cpython and java use millisecond timeouts
self.selector.select(timeout)
except ValueError as vx:
raise error("poll timeout must be a number of milliseconds or None", errno.EINVAL)
# The returned selectedKeys cannot be used from multiple threads!
return self.selector.selectedKeys()
def poll(self, timeout=None):
try:
self._check_unconnected_sockets()
selectedkeys = self._dopoll(timeout)
results = []
for k in selectedkeys.iterator():
jmask = k.readyOps()
pymask = 0
if jmask & OP_READ: pymask |= POLLIN
if jmask & OP_WRITE: pymask |= POLLOUT
if jmask & OP_ACCEPT: pymask |= POLLIN
if jmask & OP_CONNECT: pymask |= POLLOUT
# Now return the original userobject, and the return event mask
results.append( (self.chanmap[k.channel()][0], pymask) )
return results
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _deregister_all(self):
try:
for k in self.selector.keys():
k.cancel()
# Keys are not actually removed from the selector until the next select operation.
self.selector.selectNow()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
self._deregister_all()
self.selector.close()
except BaseException:
#except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _calcselecttimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except Exception as x:
raise TypeError("Select timeout value must be a number or None")
if value < 0:
raise error("Select timeout value cannot be negative", errno.EINVAL)
if floatvalue < 0.000001:
return 0
return int(floatvalue * 1000) # Convert to milliseconds
# This cache for poll objects is required because of a bug in java on MS Windows
# http://bugs.jython.org/issue1291
class poll_object_cache:
def __init__(self):
self.is_windows = os.name == 'nt'
if self.is_windows:
self.poll_object_queue = Queue.Queue()
import atexit
atexit.register(self.finalize)
def get_poll_object(self):
if not self.is_windows:
return poll()
try:
return self.poll_object_queue.get(False)
except Queue.Empty:
return poll()
def release_poll_object(self, pobj):
if self.is_windows:
pobj._deregister_all()
self.poll_object_queue.put(pobj)
else:
pobj.close()
def finalize(self):
if self.is_windows:
while True:
try:
p = self.poll_object_queue.get(False)
p.close()
except Queue.Empty:
return
_poll_object_cache = poll_object_cache()
def native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
timeout = _calcselecttimeoutvalue(timeout)
# First create a poll object to do the actual watching.
pobj = _poll_object_cache.get_poll_object()
try:
registered_for_read = {}
# Check the read list
for fd in read_fd_list:
pobj.register(fd, POLLIN)
registered_for_read[fd] = 1
# And now the write list
for fd in write_fd_list:
if fd in registered_for_read:
# registering a second time overwrites the first
pobj.register(fd, POLLIN|POLLOUT)
else:
pobj.register(fd, POLLOUT)
results = pobj.poll(timeout)
# Now start preparing the results
read_ready_list, write_ready_list, oob_ready_list = [], [], []
for fd, mask in results:
if mask & POLLIN:
read_ready_list.append(fd)
if mask & POLLOUT:
write_ready_list.append(fd)
return read_ready_list, write_ready_list, oob_ready_list
finally:
_poll_object_cache.release_poll_object(pobj)
select = native_select
def cpython_compatible_select(read_fd_list, write_fd_list, outofband_fd_list, timeout=None):
# First turn all sockets to non-blocking
# keeping track of which ones have changed
modified_channels = []
try:
for socket_list in [read_fd_list, write_fd_list, outofband_fd_list]:
for s in socket_list:
channel = _getselectable(s)
if channel.isBlocking():
modified_channels.append(channel)
channel.configureBlocking(0)
return native_select(read_fd_list, write_fd_list, outofband_fd_list, timeout)
finally:
for channel in modified_channels:
channel.configureBlocking(1)
|
marrow/tags
|
refs/heads/develop
|
marrow/tags/base.py
|
1
|
# encoding: utf-8
import inspect
from copy import deepcopy
from cgi import escape
from marrow.util.compat import IO
from marrow.util.object import NoDefault
from marrow.tags.util import quoteattrs
__all__ = ['Fragment', 'Tag', 'Text', 'AutoTag', 'tag']
class Fragment(object):
def __init__(self, data=None, *args, **kw):
self.args = list(args)
self.attrs = kw
self.data = data
super(Fragment, self).__init__()
def __repr__(self):
return "<%s args=%r attrs=%r>" % (self.name, self.args, self.attrs)
def clear(self):
self.args = list()
self.attrs = dict()
class Text(Fragment):
def __init__(self, data, escape=True, *args, **kw):
super(Text, self).__init__(data, *args, **kw)
self.escape = escape
def __iter__(self):
yield escape(self.data) if self.escape else self.data
class Flush(Fragment):
def __iter__(self):
yield ''
class Tag(Fragment):
def __init__(self, name, prefix=None, simple=False, strip=False, *args, **kw):
super(Tag, self).__init__([], *args, **kw)
self.name = name
self.prefix = prefix
self.simple = simple
self.strip = strip
def __call__(self, strip=NoDefault, *args, **kw):
self = deepcopy(self)
if strip is not NoDefault: self.strip = strip
self.args.extend(list(args))
self.attrs.update(kw)
return self
def __getitem__(self, k):
if not k: return self
self = deepcopy(self)
if not isinstance(k, (tuple, list)):
k = (k, )
for fragment in k:
if isinstance(fragment, basestring):
self.data.append(escape(fragment))
continue
self.data.append(fragment)
return self
def __repr__(self):
return "<%s children=%d args=%r attrs=%r>" % (self.name, len(self.data), self.args, self.attrs)
def __unicode__(self):
"""Return a serialized version of this tree/branch."""
return u''.join(unicode(i) for i in self)
def render(self):
buf = u""
for chunk in self:
if not chunk:
yield buf
buf = u""
continue
buf += chunk
# Handle the remaining data.
if buf:
yield buf
def __copy__(self):
return Tag(self.name, self.prefix, self.simple, self.strip, *self.args, **self.attrs)
def __iter__(self):
if not self.strip:
if self.prefix:
yield self.prefix
yield u'<' + self.name + u''.join([attr for attr in quoteattrs(self, self.attrs)]) + u'>'
if self.simple:
raise StopIteration()
for child in self.data:
if inspect.isgenerator(child):
for element in child:
if isinstance(element, unicode):
yield element
continue
for chunk in element:
yield chunk
continue
if isinstance(child, Fragment):
for element in child:
yield element
continue
if inspect.isroutine(child):
value = child()
if isinstance(value, unicode):
yield value
continue
for element in value:
yield element
continue
yield child
if not self.strip:
yield u'</' + self.name + u'>'
def clear(self):
self.data = []
super(Tag, self).clear()
def empty(self):
self.data = []
|
apple/swift-llbuild
|
refs/heads/release/5.5
|
examples/simple-make/simplebuild.py
|
1
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
"""
Simple Data-Driven Build System
"""
import json
import threading
import sys
import traceback
import llbuild
class SimpleRule(llbuild.Rule, llbuild.Task):
"""
asKey(*args) -> str
Return the key for computing the rule in the future.
"""
@classmethod
def asKey(klass, *args):
kind = klass.__name__
assert kind.endswith("Rule")
kind = kind[:-4]
return json.dumps({ "kind" : kind, "args" : args })
def create_task(self):
return self
###
# JSON-ify Wrappers
def is_result_valid(self, engine, result):
try:
return self.is_data_valid(engine, json.loads(result))
except:
traceback.print_exc()
return True
def is_data_valid(self, engine, result):
return True
def provide_value(self, engine, input_id, result):
try:
return self.provide_data(engine, input_id, json.loads(result))
except:
traceback.print_exc()
return None
def provide_data(self, engine, input_id, data):
raise RuntimeError("missing client override")
class SimpleAsyncRule(SimpleRule):
_async = True
def run(self):
abstract
def inputs_available(self, engine):
if self._async:
# Spawn a thread to do the actual work.
t = threading.Thread(target=self._execute, args=(engine,))
t.start()
else:
self._execute(engine)
def _execute(self, engine):
try:
result = self.run()
except:
traceback.print_exc()
result = None
engine.task_is_complete(self, result)
class DataDrivenEngine(llbuild.BuildEngine):
def __init__(self, namespace):
super(DataDrivenEngine, self).__init__(self)
self.namespace = namespace
def lookup_rule(self, name):
# Rules are encoded as a JSON dictionary.
data = json.loads(name)
rule_name = data["kind"] + "Rule"
rule_class = self.namespace.get(rule_name)
if rule_class is None:
raise RuntimeError("invalid rule: %r" % (data,))
return rule_class(*data['args'])
def task_is_complete(self, task, data, force_change=False):
value = json.dumps(data)
super(DataDrivenEngine, self).task_is_complete(
task, value, force_change)
def build(self, key):
result = super(DataDrivenEngine, self).build(key)
return json.loads(result)
|
lupyuen/RaspberryPiImage
|
refs/heads/master
|
home/pi/GrovePi/Software/Python/others/temboo/Library/Amazon/S3/DeleteBucket.py
|
5
|
# -*- coding: utf-8 -*-
###############################################################################
#
# DeleteBucket
# Deletes a bucket from your Amazon S3 account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteBucket(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteBucket Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeleteBucket, self).__init__(temboo_session, '/Library/Amazon/S3/DeleteBucket')
def new_input_set(self):
return DeleteBucketInputSet()
def _make_result_set(self, result, path):
return DeleteBucketResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteBucketChoreographyExecution(session, exec_id, path)
class DeleteBucketInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteBucket
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(DeleteBucketInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(DeleteBucketInputSet, self)._set_input('AWSSecretKeyId', value)
def set_BucketName(self, value):
"""
Set the value of the BucketName input for this Choreo. ((required, string) The name of the bucket that will be deleted.)
"""
super(DeleteBucketInputSet, self)._set_input('BucketName', value)
class DeleteBucketResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteBucket Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon. Note that no content is returned for successful deletions.)
"""
return self._output.get('Response', None)
class DeleteBucketChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteBucketResultSet(response, path)
|
informatics-isi-edu/synspy
|
refs/heads/master
|
launcher/launcher/impl/process_tasks.py
|
1
|
import os
import subprocess
from deriva.core import format_exception
from launcher.impl import LauncherTask, Task
class SubprocessTask(LauncherTask):
def __init__(self, parent=None):
super(SubprocessTask, self).__init__(parent)
class ViewerTask(SubprocessTask):
def __init__(self, executable, is_owner, proc_output_path=None, parent=None):
super(SubprocessTask, self).__init__(parent)
self.executable = executable
self.is_owner = is_owner
self.proc_output_path = proc_output_path
def result_callback(self, success, result):
self.set_status(success,
"Viewer subprocess execution success" if success else "Viewer subprocess execution failed",
"" if success else format_exception(result),
self.is_owner)
def run(self, file_path, working_dir=os.getcwd(), env=None):
self.task = Task(self._execute,
[self.executable, file_path, working_dir, self.proc_output_path, env],
self.result_callback)
self.start()
@staticmethod
def _execute(executable, file_path, working_dir, proc_output_path=None, env=None):
out = subprocess.PIPE
if proc_output_path:
try:
out = open(proc_output_path, "wb")
except OSError:
pass
command = [executable, file_path]
process = subprocess.Popen(command,
cwd=working_dir,
env=env,
stdin=subprocess.PIPE,
stdout=out,
stderr=subprocess.STDOUT)
ret = process.wait()
try:
out.flush()
out.close()
except:
pass
del process
if ret != 0:
raise RuntimeError('Non-zero viewer exit status %s!' % ret)
|
shepdelacreme/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/ios/ios_facts.py
|
7
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_facts
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Collect facts from remote devices running Cisco IOS
description:
- Collects a base set of device facts from a remote device that
is running IOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: ios
notes:
- Tested against IOS 15.6
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- ios_facts:
gather_subset: all
# Collect only the config and default facts
- ios_facts:
gather_subset:
- config
# Do not collect hardware facts
- ios_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: string
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: string
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: string
ansible_net_iostype:
description: The operating system type (IOS or IOS-XE) running on the remote device
returned: always
type: string
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: always
type: string
ansible_net_stacked_models:
description: The model names of each device in the stack
returned: when multiple devices are configured in a stack
type: list
ansible_net_stacked_serialnums:
description: The serial numbers of each device in the stack
returned: when multiple devices are configured in a stack
type: list
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_filesystems_info:
description: A hash of all file systems containing info about each file system (e.g. free and total space)
returned: when hardware is configured
type: dict
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: string
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.ios.ios import run_commands
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(self.module, commands=cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = ['show version']
def populate(self):
super(Default, self).populate()
data = self.responses[0]
if data:
self.facts['version'] = self.parse_version(data)
self.facts['iostype'] = self.parse_iostype(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['image'] = self.parse_image(data)
self.facts['hostname'] = self.parse_hostname(data)
self.parse_stacks(data)
def parse_version(self, data):
match = re.search(r'Version (\S+?)(?:,\s|\s)', data)
if match:
return match.group(1)
def parse_iostype(self, data):
match = re.search(r'\S+(X86_64_LINUX_IOSD-UNIVERSALK9-M)(\S+)', data)
if match:
return "IOS-XE"
else:
return "IOS"
def parse_hostname(self, data):
match = re.search(r'^(.+) uptime', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'^[Cc]isco (\S+).+bytes of .*memory', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'image file is "(.+)"', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'board ID (\S+)', data)
if match:
return match.group(1)
def parse_stacks(self, data):
match = re.findall(r'^Model [Nn]umber\s+: (\S+)', data, re.M)
if match:
self.facts['stacked_models'] = match
match = re.findall(r'^System [Ss]erial [Nn]umber\s+: (\S+)', data, re.M)
if match:
self.facts['stacked_serialnums'] = match
class Hardware(FactsBase):
COMMANDS = [
'dir',
'show memory statistics'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['filesystems'] = self.parse_filesystems(data)
self.facts['filesystems_info'] = self.parse_filesystems_info(data)
data = self.responses[1]
if data:
if 'Invalid input detected' in data:
warnings.append('Unable to gather memory statistics')
else:
processor_line = [l for l in data.splitlines()
if 'Processor' in l].pop()
match = re.findall(r'\s(\d+)\s', processor_line)
if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024
self.facts['memfree_mb'] = int(match[3]) / 1024
def parse_filesystems(self, data):
return re.findall(r'^Directory of (\S+)/', data, re.M)
def parse_filesystems_info(self, data):
facts = dict()
fs = ''
for line in data.split('\n'):
match = re.match(r'^Directory of (\S+)/', line)
if match:
fs = match.group(1)
facts[fs] = dict()
continue
match = re.match(r'^(\d+) bytes total \((\d+) bytes free\)', line)
if match:
facts[fs]['spacetotal_kb'] = int(match.group(1)) / 1024
facts[fs]['spacefree_kb'] = int(match.group(2)) / 1024
return facts
class Config(FactsBase):
COMMANDS = ['show running-config']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = [
'show interfaces',
'show ip interface',
'show ipv6 interface',
'show lldp'
]
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.responses[0]
if data:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = self.responses[1]
if data:
data = self.parse_interfaces(data)
self.populate_ipv4_interfaces(data)
data = self.responses[2]
if data:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
data = self.responses[3]
lldp_errs = ['Invalid input', 'LLDP is not enabled']
if data and not any(err in data for err in lldp_errs):
neighbors = self.run(['show lldp neighbors detail'])
if neighbors:
self.facts['neighbors'] = self.parse_neighbors(neighbors[0])
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['mediatype'] = self.parse_mediatype(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv4_interfaces(self, data):
for key, value in data.items():
self.facts['interfaces'][key]['ipv4'] = list()
primary_address = addresses = []
primary_address = re.findall(r'Internet address is (.+)$', value, re.M)
addresses = re.findall(r'Secondary address (.+)$', value, re.M)
if len(primary_address) == 0:
continue
addresses.append(primary_address[0])
for address in addresses:
addr, subnet = address.split("/")
ipv4 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv4')
self.facts['interfaces'][key]['ipv4'].append(ipv4)
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
try:
self.facts['interfaces'][key]['ipv6'] = list()
except KeyError:
self.facts['interfaces'][key] = dict()
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, neighbors):
facts = dict()
for entry in neighbors.split('------------------------------------------------'):
if entry == '':
continue
intf = self.parse_lldp_intf(entry)
if intf is None:
return facts
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = self.parse_lldp_host(entry)
fact['port'] = self.parse_lldp_port(entry)
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'Hardware is (?:.*), address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)', data)
if match:
addr, masklen = match.group(1).split('/')
return dict(address=addr, masklen=int(masklen))
def parse_mtu(self, data):
match = re.search(r'MTU (\d+)', data)
if match:
return int(match.group(1))
def parse_bandwidth(self, data):
match = re.search(r'BW (\d+)', data)
if match:
return int(match.group(1))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_mediatype(self, data):
match = re.search(r'media type is (.+)$', data, re.M)
if match:
return match.group(1)
def parse_type(self, data):
match = re.search(r'Hardware is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (\S+)\s*$', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^Local Intf: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'System Name: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'Port id: (.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
warnings = list()
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
check_args(module, warnings)
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
delete/estofadora
|
refs/heads/master
|
estofadora/statement/admin.py
|
1
|
# coding: utf-8
from django.utils.datetime_safe import datetime
from django.contrib import admin
from estofadora.statement.models import Cash, Balance
class CashAdmin(admin.ModelAdmin):
list_display = ('date', 'history', 'income', 'expenses', 'total')
search_fields = ('date', 'history')
date_hierarchy = 'date'
list_filter = ['date']
def subscribed_today(self, obj):
return obj.date.date() == datetime.today().date()
subscribed_today.short_description = (u'Cadastrado hoje?')
subscribed_today.boolean = True
admin.site.register(Cash, CashAdmin)
class BalanceAdmin(admin.ModelAdmin):
list_display = ('date', 'value')
search_fields = ('date', )
date_hierarchy = 'date'
list_filter = ['date']
def subscribed_today(self, obj):
return obj.date.date() == datetime.today().date()
subscribed_today.short_description = (u'Cadastrado hoje?')
subscribed_today.boolean = True
admin.site.register(Balance, BalanceAdmin)
|
aasoliz/Bitcoin-Statistics
|
refs/heads/master
|
venv/lib/python2.7/site-packages/jinja2/loaders.py
|
333
|
# -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
|
googleapis/google-cloud-php
|
refs/heads/master
|
OsLogin/synth.py
|
2
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import subprocess
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()
v1beta = gapic.php_library(
service='oslogin',
version='v1beta',
bazel_target='//google/cloud/oslogin/v1beta:google-cloud-oslogin-v1beta-php'
)
v1 = gapic.php_library(
service='oslogin',
version='v1',
bazel_target='//google/cloud/oslogin/v1:google-cloud-oslogin-v1-php'
)
# copy all src
s.move(v1beta / f'src/V1beta')
s.move(v1 / f'src/V1')
# copy proto files to src also
s.move(v1beta / f'proto/src/Google/Cloud/OsLogin', f'src/')
s.move(v1 / f'proto/src/Google/Cloud/OsLogin', f'src/')
s.move(v1beta / f'tests/')
s.move(v1 / f'tests/')
# copy GPBMetadata file to metadata
s.move(v1beta / f'proto/src/GPBMetadata/Google/Cloud/Oslogin', f'metadata/')
s.move(v1 / f'proto/src/GPBMetadata/Google/Cloud/Oslogin', f'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# V1 is GA, so remove @experimental tags
s.replace(
'src/V1/**/*Client.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
# fix copyright year
s.replace(
'src/V1beta/**/*.php',
r'Copyright \d{4}',
r'Copyright 2017')
s.replace(
'tests/**/V1beta/*Test.php',
r'Copyright \d{4}',
r'Copyright 2018')
s.replace(
'src/V1/**/*.php',
r'Copyright \d{4}',
r'Copyright 2018')
s.replace(
'tests/**/V1/*Test.php',
r'Copyright \d{4}',
r'Copyright 2018')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
["src/**/V*/**/*.php", "src/Common/**/*.php"],
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
["src/**/V*/**/*.php", "src/Common/**/*.php"],
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
["src/**/V*/**/*.php", "src/Common/**/*.php"],
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
# format generated clients
subprocess.run([
'npm',
'exec',
'--yes',
'--package=@prettier/plugin-php@^0.16',
'--',
'prettier',
'**/Gapic/*',
'--write',
'--parser=php',
'--single-quote',
'--print-width=80'])
|
flgiordano/netcash
|
refs/heads/master
|
+/google-cloud-sdk/lib/surface/sql/ssl_certs/create.py
|
1
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates an SSL certificate for a Cloud SQL instance."""
import os
from googlecloudsdk.api_lib.sql import errors
from googlecloudsdk.api_lib.sql import validate
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import list_printer
from googlecloudsdk.core import log
from googlecloudsdk.core.util import files
class _BaseAddCert(object):
"""Base class for sql ssl_certs create."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'common_name',
help='User supplied name. Constrained to [a-zA-Z.-_ ]+.')
parser.add_argument(
'cert_file',
default=None,
help=('Location of file which the private key of the created ssl-cert'
' will be written to.'))
@errors.ReraiseHttpException
def Run(self, args):
"""Creates an SSL certificate for a Cloud SQL instance.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object representing the operations resource describing the create
operation if the create was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
if os.path.exists(args.cert_file):
raise exceptions.ToolException('file [{path}] already exists'.format(
path=args.cert_file))
# First check if args.out_file is writeable. If not, abort and don't create
# the useless cert.
try:
with files.OpenForWritingPrivate(args.cert_file) as cf:
cf.write('placeholder\n')
except (files.Error, OSError) as e:
raise exceptions.ToolException('unable to write [{path}]: {error}'.format(
path=args.cert_file, error=str(e)))
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
# TODO(user): figure out how to rectify the common_name and the
# sha1fingerprint, so that things can work with the resource parser.
result = sql_client.sslCerts.Insert(
sql_messages.SqlSslCertsInsertRequest(
project=instance_ref.project,
instance=instance_ref.instance,
sslCertsInsertRequest=sql_messages.SslCertsInsertRequest(
commonName=args.common_name)))
private_key = result.clientCert.certPrivateKey
with files.OpenForWritingPrivate(args.cert_file) as cf:
cf.write(private_key)
cf.write('\n')
cert_ref = resources.Create(
collection='sql.sslCerts',
project=instance_ref.project,
instance=instance_ref.instance,
sha1Fingerprint=result.clientCert.certInfo.sha1Fingerprint)
log.CreatedResource(cert_ref)
return result
def Display(self, unused_args, result):
"""Display prints information about what just happened to stdout.
Args:
unused_args: The same as the args in Run.
result: A dict object representing the response if the api
request was successful.
"""
list_printer.PrintResourceList('sql.sslCerts', [result.clientCert.certInfo])
@base.ReleaseTracks(base.ReleaseTrack.GA)
class AddCert(_BaseAddCert, base.Command):
"""Creates an SSL certificate for a Cloud SQL instance."""
pass
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class AddCertBeta(_BaseAddCert, base.Command):
"""Creates an SSL certificate for a Cloud SQL instance."""
pass
|
Beercow/viper
|
refs/heads/master
|
viper/modules/editdistance.py
|
6
|
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import itertools
from viper.common.abstracts import Module
from viper.core.session import __sessions__
from viper.core.database import Database
class Editdistance(Module):
cmd = 'editdistance'
description = 'Edit distance on the filenames'
authors = ['emdel', 'nex']
def __init__(self):
super(Editdistance, self).__init__()
def edit(self):
db = Database()
samples = db.find(key='all')
filenames = []
for sample in samples:
if sample.sha256 == __sessions__.current.file.sha256:
continue
filenames.append(sample.name)
# from http://hetland.org/coding/python/levenshtein.py
def levenshtein(a, b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
distance = []
for i in itertools.combinations(filenames, 2):
edit = levenshtein(i[0], i[1])
distance.append(edit)
self.log('info', "Average Edit distance: {0}".format(sum(distance) / len(distance)))
def run(self):
super(Editdistance, self).run()
if self.args is None:
return
if not __sessions__.is_set():
self.log('error', "No open session")
return
self.edit()
|
sunlianqiang/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/lib2to3/tests/test_refactor.py
|
91
|
"""
Unit tests for refactor.py.
"""
from __future__ import with_statement
import sys
import os
import codecs
import operator
import io
import tempfile
import shutil
import unittest
import warnings
from lib2to3 import refactor, pygram, fixer_base
from lib2to3.pgen2 import token
from . import support
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
sys.path.append(FIXER_DIR)
try:
_DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
finally:
sys.path.pop()
_2TO3_FIXERS = refactor.get_fixers_from_package("lib2to3.fixes")
class TestRefactoringTool(unittest.TestCase):
def setUp(self):
sys.path.append(FIXER_DIR)
def tearDown(self):
sys.path.pop()
def check_instances(self, instances, classes):
for inst, cls in zip(instances, classes):
if not isinstance(inst, cls):
self.fail("%s are not instances of %s" % instances, classes)
def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
return refactor.RefactoringTool(fixers, options, explicit)
def test_print_function_option(self):
rt = self.rt({"print_function" : True})
self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
self.assertIs(rt.driver.grammar,
pygram.python_grammar_no_print_statement)
def test_write_unchanged_files_option(self):
rt = self.rt()
self.assertFalse(rt.write_unchanged_files)
rt = self.rt({"write_unchanged_files" : True})
self.assertTrue(rt.write_unchanged_files)
def test_fixer_loading_helpers(self):
contents = ["explicit", "first", "last", "parrot", "preorder"]
non_prefixed = refactor.get_all_fix_names("myfixes")
prefixed = refactor.get_all_fix_names("myfixes", False)
full_names = refactor.get_fixers_from_package("myfixes")
self.assertEqual(prefixed, ["fix_" + name for name in contents])
self.assertEqual(non_prefixed, contents)
self.assertEqual(full_names,
["myfixes.fix_" + name for name in contents])
def test_detect_future_features(self):
run = refactor._detect_future_features
fs = frozenset
empty = fs()
self.assertEqual(run(""), empty)
self.assertEqual(run("from __future__ import print_function"),
fs(("print_function",)))
self.assertEqual(run("from __future__ import generators"),
fs(("generators",)))
self.assertEqual(run("from __future__ import generators, feature"),
fs(("generators", "feature")))
inp = "from __future__ import generators, print_function"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp ="from __future__ import print_function, generators"
self.assertEqual(run(inp), fs(("print_function", "generators")))
inp = "from __future__ import (print_function,)"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "from __future__ import (generators, print_function)"
self.assertEqual(run(inp), fs(("generators", "print_function")))
inp = "from __future__ import (generators, nested_scopes)"
self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
inp = """from __future__ import generators
from __future__ import print_function"""
self.assertEqual(run(inp), fs(("generators", "print_function")))
invalid = ("from",
"from 4",
"from x",
"from x 5",
"from x im",
"from x import",
"from x import 4",
)
for inp in invalid:
self.assertEqual(run(inp), empty)
inp = "'docstring'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
inp = "# comment\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "# comment\n'doc'\nfrom __future__ import print_function"
self.assertEqual(run(inp), fs(("print_function",)))
inp = "class x: pass\nfrom __future__ import print_function"
self.assertEqual(run(inp), empty)
def test_get_headnode_dict(self):
class NoneFix(fixer_base.BaseFix):
pass
class FileInputFix(fixer_base.BaseFix):
PATTERN = "file_input< any * >"
class SimpleFix(fixer_base.BaseFix):
PATTERN = "'name'"
no_head = NoneFix({}, [])
with_head = FileInputFix({}, [])
simple = SimpleFix({}, [])
d = refactor._get_headnode_dict([no_head, with_head, simple])
top_fixes = d.pop(pygram.python_symbols.file_input)
self.assertEqual(top_fixes, [with_head, no_head])
name_fixes = d.pop(token.NAME)
self.assertEqual(name_fixes, [simple, no_head])
for fixes in d.values():
self.assertEqual(fixes, [no_head])
def test_fixer_loading(self):
from myfixes.fix_first import FixFirst
from myfixes.fix_last import FixLast
from myfixes.fix_parrot import FixParrot
from myfixes.fix_preorder import FixPreorder
rt = self.rt()
pre, post = rt.get_fixers()
self.check_instances(pre, [FixPreorder])
self.check_instances(post, [FixFirst, FixParrot, FixLast])
def test_naughty_fixers(self):
self.assertRaises(ImportError, self.rt, fixers=["not_here"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
def test_refactor_string(self):
rt = self.rt()
input = "def parrot(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertNotEqual(str(tree), input)
input = "def f(): pass\n\n"
tree = rt.refactor_string(input, "<test>")
self.assertEqual(str(tree), input)
def test_refactor_stdin(self):
class MyRT(refactor.RefactoringTool):
def print_output(self, old_text, new_text, filename, equal):
results.extend([old_text, new_text, filename, equal])
results = []
rt = MyRT(_DEFAULT_FIXERS)
save = sys.stdin
sys.stdin = io.StringIO("def parrot(): pass\n\n")
try:
rt.refactor_stdin()
finally:
sys.stdin = save
expected = ["def parrot(): pass\n\n",
"def cheese(): pass\n\n",
"<stdin>", False]
self.assertEqual(results, expected)
def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
options=None, mock_log_debug=None,
actually_write=True):
tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
self.addCleanup(shutil.rmtree, tmpdir)
# make a copy of the tested file that we can write to
shutil.copy(test_file, tmpdir)
test_file = os.path.join(tmpdir, os.path.basename(test_file))
os.chmod(test_file, 0o644)
def read_file():
with open(test_file, "rb") as fp:
return fp.read()
old_contents = read_file()
rt = self.rt(fixers=fixers, options=options)
if mock_log_debug:
rt.log_debug = mock_log_debug
rt.refactor_file(test_file)
self.assertEqual(old_contents, read_file())
if not actually_write:
return
rt.refactor_file(test_file, True)
new_contents = read_file()
self.assertNotEqual(old_contents, new_contents)
return new_contents
def test_refactor_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
def test_refactor_file_write_unchanged_file(self):
test_file = os.path.join(FIXER_DIR, "parrot_example.py")
debug_messages = []
def recording_log_debug(msg, *args):
debug_messages.append(msg % args)
self.check_file_refactoring(test_file, fixers=(),
options={"write_unchanged_files": True},
mock_log_debug=recording_log_debug,
actually_write=False)
# Testing that it logged this message when write=False was passed is
# sufficient to see that it did not bail early after "No changes".
message_regex = r"Not writing changes to .*%s%s" % (
os.sep, os.path.basename(test_file))
for message in debug_messages:
if "Not writing changes" in message:
self.assertRegex(message, message_regex)
break
else:
self.fail("%r not matched in %r" % (message_regex, debug_messages))
def test_refactor_dir(self):
def check(structure, expected):
def mock_refactor_file(self, f, *args):
got.append(f)
save_func = refactor.RefactoringTool.refactor_file
refactor.RefactoringTool.refactor_file = mock_refactor_file
rt = self.rt()
got = []
dir = tempfile.mkdtemp(prefix="2to3-test_refactor")
try:
os.mkdir(os.path.join(dir, "a_dir"))
for fn in structure:
open(os.path.join(dir, fn), "wb").close()
rt.refactor_dir(dir)
finally:
refactor.RefactoringTool.refactor_file = save_func
shutil.rmtree(dir)
self.assertEqual(got,
[os.path.join(dir, path) for path in expected])
check([], [])
tree = ["nothing",
"hi.py",
".dumb",
".after.py",
"notpy.npy",
"sappy"]
expected = ["hi.py"]
check(tree, expected)
tree = ["hi.py",
os.path.join("a_dir", "stuff.py")]
check(tree, tree)
def test_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
self.check_file_refactoring(fn)
def test_false_file_encoding(self):
fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
data = self.check_file_refactoring(fn)
def test_bom(self):
fn = os.path.join(TEST_DATA_DIR, "bom.py")
data = self.check_file_refactoring(fn)
self.assertTrue(data.startswith(codecs.BOM_UTF8))
def test_crlf_newlines(self):
old_sep = os.linesep
os.linesep = "\r\n"
try:
fn = os.path.join(TEST_DATA_DIR, "crlf.py")
fixes = refactor.get_fixers_from_package("lib2to3.fixes")
self.check_file_refactoring(fn, fixes)
finally:
os.linesep = old_sep
def test_refactor_docstring(self):
rt = self.rt()
doc = """
>>> example()
42
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertEqual(out, doc)
doc = """
>>> def parrot():
... return 43
"""
out = rt.refactor_docstring(doc, "<test>")
self.assertNotEqual(out, doc)
def test_explicit(self):
from myfixes.fix_explicit import FixExplicit
rt = self.rt(fixers=["myfixes.fix_explicit"])
self.assertEqual(len(rt.post_order), 0)
rt = self.rt(explicit=["myfixes.fix_explicit"])
for fix in rt.post_order:
if isinstance(fix, FixExplicit):
break
else:
self.fail("explicit fixer not loaded")
|
ds-hwang/chromium-crosswalk
|
refs/heads/master
|
third_party/WebKit/Source/platform/v8_inspector/build/xxd.py
|
10
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Represent a file as a C++ constant string.
Usage:
python xxd.py VAR SOURCE DEST
"""
import sys
import rjsmin
def main():
variable_name, input_filename, output_filename = sys.argv[1:]
with open(input_filename) as input_file:
input_text = input_file.read()
input_text = rjsmin.jsmin(input_text)
hex_values = ['0x{0:02x}'.format(ord(char)) for char in input_text]
const_declaration = 'const char %s[] = {\n%s\n};\n' % (
variable_name, ', '.join(hex_values))
with open(output_filename, 'w') as output_file:
output_file.write(const_declaration)
if __name__ == '__main__':
sys.exit(main())
|
cgar/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/_pytest/_code/__init__.py
|
176
|
""" python inspection/code generation API """
from .code import Code # noqa
from .code import ExceptionInfo # noqa
from .code import Frame # noqa
from .code import Traceback # noqa
from .code import getrawcode # noqa
from .code import patch_builtins # noqa
from .code import unpatch_builtins # noqa
from .source import Source # noqa
from .source import compile_ as compile # noqa
from .source import getfslineno # noqa
|
gooddata/openstack-nova
|
refs/heads/master
|
nova/tests/json_ref.py
|
6
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_serialization import jsonutils
def _resolve_ref(ref, base_path):
file_path, _, json_path = ref.partition('#')
if json_path:
raise NotImplementedError('JSON refs with JSON path after the "#" is '
'not yet supported')
path = os.path.join(base_path, file_path)
# binary mode is needed due to bug/1515231
with open(path, 'r+b') as f:
ref_value = jsonutils.load(f)
base_path = os.path.dirname(path)
res = resolve_refs(ref_value, base_path)
return res
def resolve_refs(obj_with_refs, base_path):
if isinstance(obj_with_refs, list):
for i, item in enumerate(obj_with_refs):
obj_with_refs[i] = resolve_refs(item, base_path)
elif isinstance(obj_with_refs, dict):
if '$ref' in obj_with_refs.keys():
ref = obj_with_refs.pop('$ref')
resolved_ref = _resolve_ref(ref, base_path)
# the rest of the ref dict contains overrides for the ref. Resolve
# refs in the overrides then apply those overrides recursively
# here.
resolved_overrides = resolve_refs(obj_with_refs, base_path)
_update_dict_recursively(resolved_ref, resolved_overrides)
return resolved_ref
else:
for key, value in obj_with_refs.items():
obj_with_refs[key] = resolve_refs(value, base_path)
else:
# scalar, nothing to do
pass
return obj_with_refs
def _update_dict_recursively(d, update):
"""Update dict d recursively with data from dict update"""
for k, v in update.items():
if k in d and isinstance(d[k], dict) and isinstance(v, dict):
_update_dict_recursively(d[k], v)
else:
d[k] = v
|
cjlux/Poppy-ENSAM-Talence
|
refs/heads/French
|
Environnement_programmation/Cours python/Tests/2012-dec/Correction/DecodeView.py
|
1
|
# -*- coding: utf-8 -*-
from PyQt4.QtGui import QFrame, QLabel, QPushButton
from PyQt4.QtCore import SIGNAL
from Viewer import Viewer
class DecodeView(Viewer):
'To view the loaded image and the hidden text (if any)'
def __init__(self, parent, posx, posy):
Viewer.__init__(self, parent, posx, posy)
self.__hiddenText = QLabel(self)
self.__btnDecode = QPushButton("Decode",self)
self.__ConfigureWidgets()
def __ConfigureWidgets(self):
# configures widgets in base class:
Viewer._ConfigureWidgets(self)
self.connect(self._btnLoad, SIGNAL('clicked()'), self.LoadImage)
B = self._border
Wi, Wb, Wt = self._imageWidth , self._btnWidth , self._textWidth
Hi, Hb, Ht = self._imageHeight, self._btnHeight, self._textHeight
# resizes, moves and connects the button "Decode":
self.__btnDecode.resize(Wb, Hb)
self.__btnDecode.move(B+Wi+B, B+Hb+B)
self.connect(self.__btnDecode, SIGNAL('clicked()'), self.Decode)
# resizes, moves and configures the text area:
self.__hiddenText.move(B, B+Hb+B/2+Hi+B)
self.__hiddenText.resize(Wt, Ht)
self.__hiddenText.setText("")
self.__hiddenText.setWordWrap(True)
self.__hiddenText.setFrameShape(QFrame.Box)
def LoadImage(self):
# call LoadImage in base class:
Viewer._LoadImage(self)
self.__hiddenText.setText("")
def Decode(self):
'To retrieve the hidden texte inside the image'
if self._initialImage != None:
# calls method FindTextInImage() on the object steganoGP in base class:
hiddenText = self._steganoGP.FindTextInImage(self._initialImage)
# unicode(text,'utf-8') returns the text encoded with the UTF-8 standard.
texte = unicode(hiddenText,'utf-8')
# Displays the text in the appropriate widget:
self.__hiddenText.setText(texte)
|
kaiserbrito/pythonshop
|
refs/heads/master
|
catalog/models.py
|
35
|
from django.db import models
|
ibab/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/utils/inspect_checkpoint.py
|
5
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.utils import checkpoints
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("file_name", "", "Checkpoint filename")
tf.app.flags.DEFINE_string("tensor_name", "", "Name of the tensor to inspect")
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
if not tensor_name:
variables = checkpoints.list_variables(file_name)
for name, shape in variables:
print("%s\t%s" % (name, str(shape)))
else:
print("tensor_name: ", tensor_name)
print(checkpoints.load_variable(file_name, tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=<checkpoint_file_name "
"or directory> [--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name)
if __name__ == "__main__":
tf.app.run()
|
agermanidis/pyalphaclock
|
refs/heads/master
|
pyalphaclock/__init__.py
|
1
|
import calendar
import datetime
import serial
import time
def pad_string(s, length=5, align='right'):
s = s[:length]
while len(s) < length:
if align == 'left':
s += ' '
else:
s = ' ' + s
return str(s).upper()
def substr(s, start, end):
spaces_before = ' ' * (0-start)
spaces_after = ' ' * (end-len(s))
return spaces_before + s[max(0,start):end] + spaces_after
class AlphaClock(object):
def __init__(self, path='/dev/tty.usbserial-AE015K9Z'):
self.serial = serial.Serial(path, 19200)
def clear_screen(self):
"""
Clears the clock screen.
"""
self.serial.write('\xffA0' + " "*5 + "0"*5)
def display_text(self, s, align='right'):
"""
Display the provided string in the clock.
Only the first 5 characters of the provided string are considered.
If the string has length less than 5, then the 'align' keyword argument
determines whether it will be aligned to the left or to the right when displayed.
"""
to_display = pad_string(s, align=align)
self.serial.write('\xffA0' + to_display + "0"*5)
def display_scrolling_text(self, s, delay=0.5):
"""
Display text that's longer than 5 characters by scrolling through it.
You can specify the delay between every subsequent character with the 'delay' keyword argument.
"""
for i in range(-4, len(s)):
self.display_text(substr(s, i, i+5))
time.sleep(delay)
def display_date(self, d):
"""
Display the month and day from a datetime object provided (e.g. 'FEB23')"
"""
to_display = d.strftime("%b%d").upper()
self.serial.write('\xffA0' + to_display + "00100")
def display_time(self, d):
"""
Display the hour and minute from a datetime object provided (e.g. '12:52')"
"""
unixtime = time.mktime(d.timetuple())
unixtime = calendar.timegm(d.utctimetuple())
self.serial.write('\xffMT' + " "*10)
self.serial.write('\xffST' + str(int(unixtime)))
|
sdopoku/flask-hello-world
|
refs/heads/master
|
venv/lib/python2.7/site-packages/flask/config.py
|
781
|
# -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
|
oscaro/django-oscar-adyen
|
refs/heads/master
|
adyen/migrations/0002_auto_20141016_1601.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('adyen', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='adyentransaction',
name='order_number',
field=models.CharField(max_length=255),
),
]
|
apanju/odoo
|
refs/heads/8.0
|
addons/report/__openerp__.py
|
385
|
{
'name': 'Report',
'category': 'Base',
'summary': 'Report',
'version': '1.0',
'description': """
Report
""",
'author': 'OpenERP SA',
'depends': ['base', 'web'],
'data': [
'views/layouts.xml',
'views/views.xml',
'data/report_paperformat.xml',
'security/ir.model.access.csv',
'views/report.xml',
],
'installable': True,
'auto_install': True,
}
|
kornev/twitter_nlp
|
refs/heads/master
|
python/cap/cap_eval.py
|
10
|
#!/usr/bin/python
###################################################################################
# cap_eval.py
#
# Evaluates how well we can identify whether a given tweet is properly capitalized
###################################################################################
import re
import sys
sys.path.append('/homes/gws/aritter/twitter_nlp/python')
import twokenize
class DictEval:
def __init__(self, capFile):
#Read in capitalization dictionary from NYT data
self.capDict = {}
capDictSize = 5000
for line in open(capFile):
line = line.rstrip('\n')
(llr, word, cap, cw, cc, cwc, N) = line.split('\t')
self.capDict[word] = cap
capDictSize -= 1
if capDictSize == 0:
break
def Eval(self, tweet):
words = tweet[6].split(' ')
for i in range(len(words)):
capitalized = re.search(r'^[A-Z]|[a-z][A-Z]', words[i])
if i > 0 and not re.match(r"\.|\?|!|@.+|http:.+|:|\"", words[i-1]):
if capitalized and self.capDict.get(words[i].lower(), '1') != '1':
return False
#elif not capitalized and re.match(r'[a-z]+', words[i]) and self.capDict.get(words[i].lower(), '1') != '0':
# return False
return True
######################################################################
# Simple heuristic: at least one word is capitalized and at least
# one noun or verb is lowercase
######################################################################
def SimpleEval(tweet):
words = tweet[6].split(' ')
capFound = False
lowFound = False
for i in range(len(words)):
if re.search(r'[A-Z]', words[i][0:1]):
capFound = True
elif i > 0 and not re.match(r"\.|\?|!|@.+|http:.+|:|\"", words[i-1]) and re.match(r'[^a-z]+', words[i]):
lowFound = True
return capFound and lowFound
def Baseline(tweet):
return True
class CapEval:
def __init__(self, testData):
self.labeledTweets = []
for line in open(testData):
line = line.rstrip('\n')
fields = line.split('\t')
fields[6] = ' '.join(twokenize.tokenize(fields[6]))
self.labeledTweets.append(fields)
def Eval(self, classifier):
fp = 0.0
fn = 0.0
tp = 0.0
tn = 0.0
for tweet in self.labeledTweets:
if classifier(tweet) and tweet[len(tweet)-1] == '1':
tp += 1.0
elif not classifier(tweet) and tweet[len(tweet)-1] == '1':
fn += 1.0
elif not classifier(tweet) and tweet[len(tweet)-1] == '0':
tn += 1.0
elif classifier(tweet) and tweet[len(tweet)-1] == '0':
fp += 1.0
#Avoid division by 0
if tp > 0.0:
return (tp, tn, fp, fn, tp / (tp + fp), tp / (tp + fn))
else:
return (tp, tn, fp, fn, 0, 0)
if __name__ == "__main__":
ce = CapEval('/homes/gws/aritter/twitter_nlp/data/cap/tweets_cap_test.csv')
de = DictEval('/homes/gws/aritter/twitter_nlp/data/cap/nyt_cap_llr')
print "dict:\t" + str(ce.Eval(de.Eval))
print "simple:\t" + str(ce.Eval(SimpleEval))
|
DominoTree/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/execute_async_script/collections.py
|
9
|
import os
from tests.support.asserts import assert_same_element, assert_success
from tests.support.inline import inline
def execute_async_script(session, script, args=None):
if args is None:
args = []
body = {"script": script, "args": args}
return session.transport.send(
"POST", "/session/{session_id}/execute/async".format(**vars(session)),
body)
def test_arguments(session):
response = execute_async_script(session, """
let resolve = arguments[0];
function func() {
return arguments;
}
resolve(func("foo", "bar"));
""")
assert_success(response, [u"foo", u"bar"])
def test_array(session):
response = execute_async_script(session, """
let resolve = arguments[0];
resolve([1, 2]);
""")
assert_success(response, [1, 2])
def test_file_list(session, tmpdir):
files = [tmpdir.join("foo.txt"), tmpdir.join("bar.txt")]
session.url = inline("<input type=file multiple>")
upload = session.find.css("input", all=False)
for file in files:
file.write("morn morn")
upload.send_keys(str(file))
response = execute_async_script(session, """
let resolve = arguments[0];
resolve(document.querySelector('input').files);
""")
value = assert_success(response)
assert isinstance(value, list)
assert len(value) == len(files)
for expected, actual in zip(files, value):
assert isinstance(actual, dict)
assert "name" in actual
assert isinstance(actual["name"], basestring)
assert os.path.basename(str(expected)) == actual["name"]
def test_html_all_collection(session):
session.url = inline("""
<p>foo
<p>bar
""")
html = session.find.css("html", all=False)
head = session.find.css("head", all=False)
meta = session.find.css("meta", all=False)
body = session.find.css("body", all=False)
ps = session.find.css("p")
response = execute_async_script(session, """
let resolve = arguments[0];
resolve(document.all);
""")
value = assert_success(response)
assert isinstance(value, list)
# <html>, <head>, <meta>, <body>, <p>, <p>
assert len(value) == 6
assert_same_element(session, html, value[0])
assert_same_element(session, head, value[1])
assert_same_element(session, meta, value[2])
assert_same_element(session, body, value[3])
assert_same_element(session, ps[0], value[4])
assert_same_element(session, ps[1], value[5])
def test_html_collection(session):
session.url = inline("""
<p>foo
<p>bar
""")
ps = session.find.css("p")
response = execute_async_script(session, """
let resolve = arguments[0];
resolve(document.getElementsByTagName('p'));
""")
value = assert_success(response)
assert isinstance(value, list)
assert len(value) == 2
for expected, actual in zip(ps, value):
assert_same_element(session, expected, actual)
def test_html_form_controls_collection(session):
session.url = inline("""
<form>
<input>
<input>
</form>
""")
inputs = session.find.css("input")
response = execute_async_script(session, """
let resolve = arguments[0];
resolve(document.forms[0].elements);
""")
value = assert_success(response)
assert isinstance(value, list)
assert len(value) == 2
for expected, actual in zip(inputs, value):
assert_same_element(session, expected, actual)
def test_html_options_collection(session):
session.url = inline("""
<select>
<option>
<option>
</select>
""")
options = session.find.css("option")
response = execute_async_script(session, """
let resolve = arguments[0];
resolve(document.querySelector('select').options);
""")
value = assert_success(response)
assert isinstance(value, list)
assert len(value) == 2
for expected, actual in zip(options, value):
assert_same_element(session, expected, actual)
def test_node_list(session):
session.url = inline("""
<p>foo
<p>bar
""")
ps = session.find.css("p")
response = execute_async_script(session, """
let resolve = arguments[0];
resolve(document.querySelectorAll('p'));
""")
value = assert_success(response)
assert isinstance(value, list)
assert len(value) == 2
for expected, actual in zip(ps, value):
assert_same_element(session, expected, actual)
|
sysalexis/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_codecmaps_hk.py
|
60
|
#
# test_codecmaps_hk.py
# Codec mapping tests for HongKong encodings
#
from test import support
from test import multibytecodec_support
import unittest
class TestBig5HKSCSMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'big5hkscs'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/BIG5HKSCS-2004.TXT'
if __name__ == "__main__":
unittest.main()
|
ryfeus/lambda-packs
|
refs/heads/master
|
Selenium_PhantomJS/source/setuptools/__init__.py
|
4
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools.extern.six.moves import filter, filterfalse, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages',
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Check if the directory is a package and passes the filters
if ('.' not in dir
and include(package)
and not exclude(package)
and cls._looks_like_package(full_path)):
yield package
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
monkey.patch_all()
|
pabloborrego93/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/coursetalk/migrations/0001_initial.py
|
56
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseTalkWidgetConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('platform_key', models.CharField(help_text="This key needs to associate CourseTalk reviews with your platform. Better to use domain name Ex: for 'http://edx.org' platform_key will be 'edx'", max_length=50)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
|
thezawad/kivy
|
refs/heads/master
|
examples/widgets/lists/list_simple_in_kv.py
|
52
|
from kivy.uix.modalview import ModalView
from kivy.uix.listview import ListView
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
Builder.load_string("""
<ListViewModal>:
size_hint: None,None
size: 400,400
ListView:
size_hint: .8,.8
item_strings: [str(index) for index in range(100)]
""")
class ListViewModal(ModalView):
def __init__(self, **kwargs):
super(ListViewModal, self).__init__(**kwargs)
class MainView(GridLayout):
"""Implementation of a list view declared in a kv template.
"""
def __init__(self, **kwargs):
kwargs['cols'] = 1
super(MainView, self).__init__(**kwargs)
listview_modal = ListViewModal()
self.add_widget(listview_modal)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
|
mKeRix/home-assistant
|
refs/heads/dev
|
homeassistant/components/mcp23017/binary_sensor.py
|
7
|
"""Support for binary sensor using I2C MCP23017 chip."""
import logging
from adafruit_mcp230xx.mcp23017 import MCP23017 # pylint: disable=import-error
import board # pylint: disable=import-error
import busio # pylint: disable=import-error
import digitalio # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_PINS = "pins"
CONF_PULL_MODE = "pull_mode"
MODE_UP = "UP"
MODE_DOWN = "DOWN"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
DEFAULT_PULL_MODE = MODE_UP
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): vol.All(
vol.Upper, vol.In([MODE_UP, MODE_DOWN])
),
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MCP23017 binary sensors."""
pull_mode = config[CONF_PULL_MODE]
invert_logic = config[CONF_INVERT_LOGIC]
i2c_address = config[CONF_I2C_ADDRESS]
i2c = busio.I2C(board.SCL, board.SDA)
mcp = MCP23017(i2c, address=i2c_address)
binary_sensors = []
pins = config[CONF_PINS]
for pin_num, pin_name in pins.items():
pin = mcp.get_pin(pin_num)
binary_sensors.append(
MCP23017BinarySensor(pin_name, pin, pull_mode, invert_logic)
)
add_devices(binary_sensors, True)
class MCP23017BinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses MCP23017."""
def __init__(self, name, pin, pull_mode, invert_logic):
"""Initialize the MCP23017 binary sensor."""
self._name = name or DEVICE_DEFAULT_NAME
self._pin = pin
self._pull_mode = pull_mode
self._invert_logic = invert_logic
self._state = None
self._pin.direction = digitalio.Direction.INPUT
self._pin.pull = digitalio.Pull.UP
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state != self._invert_logic
def update(self):
"""Update the GPIO state."""
self._state = self._pin.value
|
tplusx/ns3-gpsr
|
refs/heads/master
|
src/aodv/bindings/callbacks_list.py
|
56
|
callback_classes = [
['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::Socket::SocketErrno', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Ipv4Route>', 'ns3::Ptr<ns3::Packet const>', 'ns3::Ipv4Header const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::WifiMacHeader const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::ArpCache const>', 'ns3::Ipv4Address', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
kivatu/kivy_old
|
refs/heads/master
|
kivy/uix/gridlayout.py
|
4
|
'''
Grid Layout
===========
.. only:: html
.. image:: images/gridlayout.gif
:align: right
.. only:: latex
.. image:: images/gridlayout.png
:align: right
.. versionadded:: 1.0.4
The :class:`GridLayout` arranges children in a matrix. It takes the available
space and divides it into columns and rows, then adds widgets to the resulting
"cells".
.. versionadded:: 1.0.7
The implementation has changed to use the widget size_hint for calculating
column/row sizes. `uniform_width` and `uniform_height` have been removed
and other properties have added to give you more control.
Background
----------
Unlike many other toolkits, you cannot explicitly place a widget in a specific
column/row. Each child is automatically assigned a position determined by the
layout configuration and the child's index in the children list.
A GridLayout must always have at least one input constraint:
:data:`GridLayout.cols` or :data:`GridLayout.rows`. If you do not specify cols
or rows, the Layout will throw an exception.
Column Width and Row Height
---------------------------
The column width/row height are determined in 3 steps:
- The initial size is given by the :data:`col_default_width` and
:data:`row_default_height` properties. To customize the size of a single
column or row, use :data:`cols_minimum` or :data:`rows_minimum`.
- The `size_hint_x`/`size_hint_y` of the children are taken into account.
If no widgets have a size hint, the maximum size is used for all children.
- You can force the default size by setting the :data:`col_force_default`
or :data:`row_force_default` property. This will force the layout to
ignore the `width` and `size_hint` properties of children and use the
default size.
Using a GridLayout
------------------
In the example below, all widgets will have an equal size. By default, the
`size_hint` is (1, 1), so a Widget will take the full size of the parent::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1'))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2'))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_1.jpg
Now, let's fix the size of Hello buttons to 100px instead of using
size_hint_x=1::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_2.jpg
Next, let's fix the row height to a specific size::
layout = GridLayout(cols=2, row_force_default=True, row_default_height=40)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_3.jpg
'''
__all__ = ('GridLayout', 'GridLayoutException')
from kivy.logger import Logger
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, BooleanProperty, DictProperty, \
BoundedNumericProperty, ReferenceListProperty, VariableListProperty
from math import ceil
def nmax(*args):
'''(internal) Implementation of a max() function that supports None.
'''
# merge into one list
args = [x for x in args if x is not None]
return max(args)
class GridLayoutException(Exception):
'''Exception for errors if the grid layout manipulation fails.
'''
pass
class GridLayout(Layout):
'''Grid layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a one argument form [spacing].
:data:`spacing` is a
:class:`~kivy.properties.VariableListProperty`, default to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced NumericProperty with VariableListProperty.
:data:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0, 0, 0].
'''
cols = BoundedNumericProperty(None, min=0, allow_none=True)
'''Number of columns in the grid.
.. versionadded:: 1.0.8
Changed from a NumericProperty to BoundedNumericProperty. You can no
longer set this to a negative value.
:data:`cols` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
rows = BoundedNumericProperty(None, min=0, allow_none=True)
'''Number of rows in the grid.
.. versionadded:: 1.0.8
Changed from a NumericProperty to a BoundedNumericProperty. You can no
longer set this to a negative value.
:data:`rows` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
col_default_width = NumericProperty(0)
'''Default minimum size to use for a column.
.. versionadded:: 1.0.7
:data:`col_default_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
row_default_height = NumericProperty(0)
'''Default minimum size to use for row.
.. versionadded:: 1.0.7
:data:`row_default_height` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
col_force_default = BooleanProperty(False)
'''If True, ignore the width and size_hint_x of the child and use the
default column width.
.. versionadded:: 1.0.7
:data:`col_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
row_force_default = BooleanProperty(False)
'''If True, ignore the height and size_hint_y of the child and use the
default row height.
.. versionadded:: 1.0.7
:data:`row_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
cols_minimum = DictProperty({})
'''List of minimum sizes for each column.
.. versionadded:: 1.0.7
:data:`cols_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
rows_minimum = DictProperty({})
'''List of minimum sizes for each row.
.. versionadded:: 1.0.7
:data:`rows_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
minimum_width = NumericProperty(0)
'''Minimum width needed to contain all children.
.. versionadded:: 1.0.8
:data:`minimum_width` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_height = NumericProperty(0)
'''Minimum height needed to contain all children.
.. versionadded:: 1.0.8
:data:`minimum_height` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Minimum size needed to contain all children.
.. versionadded:: 1.0.8
:data:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty` of
(:data:`minimum_width`, :data:`minimum_height`) properties.
'''
def __init__(self, **kwargs):
self._cols = self._rows = None
super(GridLayout, self).__init__(**kwargs)
self.bind(
col_default_width=self._trigger_layout,
row_default_height=self._trigger_layout,
col_force_default=self._trigger_layout,
row_force_default=self._trigger_layout,
cols=self._trigger_layout,
rows=self._trigger_layout,
parent=self._trigger_layout,
spacing=self._trigger_layout,
padding=self._trigger_layout,
children=self._trigger_layout,
size=self._trigger_layout,
pos=self._trigger_layout)
def get_max_widgets(self):
if self.cols and not self.rows:
return None
if self.rows and not self.cols:
return None
if not self.cols and not self.rows:
return None
return self.rows * self.cols
def on_children(self, instance, value):
# if that makes impossible to construct things with deffered method,
# migrate this test in do_layout, and/or issue a warning.
smax = self.get_max_widgets()
if smax and len(value) > smax:
raise GridLayoutException(
'Too many children in GridLayout. Increase rows/cols!')
def update_minimum_size(self, *largs):
# the goal here is to calculate the minimum size of every cols/rows
# and determine if they have stretch or not
current_cols = self.cols
current_rows = self.rows
children = self.children
len_children = len(children)
# if no cols or rows are set, we can't calculate minimum size.
# the grid must be contrained at least on one side
if not current_cols and not current_rows:
Logger.warning('%r have no cols or rows set, '
'layout is not triggered.' % self)
return None
if current_cols is None:
current_cols = int(ceil(len_children / float(current_rows)))
elif current_rows is None:
current_rows = int(ceil(len_children / float(current_cols)))
current_cols = max(1, current_cols)
current_rows = max(1, current_rows)
cols = [self.col_default_width] * current_cols
cols_sh = [None] * current_cols
rows = [self.row_default_height] * current_rows
rows_sh = [None] * current_rows
# update minimum size from the dicts
# FIXME index might be outside the bounds ?
for index, value in self.cols_minimum.items():
cols[index] = value
for index, value in self.rows_minimum.items():
rows[index] = value
# calculate minimum size for each columns and rows
i = len_children - 1
for row in range(current_rows):
for col in range(current_cols):
# don't go further is we don't have child left
if i < 0:
break
# get initial information from the child
c = children[i]
shw = c.size_hint_x
shh = c.size_hint_y
w = c.width
h = c.height
# compute minimum size / maximum stretch needed
if shw is None:
cols[col] = nmax(cols[col], w)
else:
cols_sh[col] = nmax(cols_sh[col], shw)
if shh is None:
rows[row] = nmax(rows[row], h)
else:
rows_sh[row] = nmax(rows_sh[row], shh)
# next child
i = i - 1
# calculate minimum width/height needed, starting from padding + spacing
padding_x = self.padding[0] + self.padding[2]
padding_y = self.padding[1] + self.padding[3]
spacing_x, spacing_y = self.spacing
width = padding_x + spacing_x * (current_cols - 1)
height = padding_y + spacing_y * (current_rows - 1)
# then add the cell size
width += sum(cols)
height += sum(rows)
# remember for layout
self._cols = cols
self._rows = rows
self._cols_sh = cols_sh
self._rows_sh = rows_sh
# finally, set the minimum size
self.minimum_size = (width, height)
def do_layout(self, *largs):
self.update_minimum_size()
if self._cols is None:
return
if self.cols is None and self.rows is None:
raise GridLayoutException('Need at least cols or rows constraint.')
children = self.children
len_children = len(children)
if len_children == 0:
return
# speedup
padding_left = self.padding[0]
padding_top = self.padding[1]
spacing_x, spacing_y = self.spacing
selfx = self.x
selfw = self.width
selfh = self.height
# resolve size for each column
if self.col_force_default:
cols = [self.col_default_width] * len(self._cols)
for index, value in self.cols_minimum.items():
cols[index] = value
else:
cols = self._cols[:]
cols_sh = self._cols_sh
cols_weigth = sum([x for x in cols_sh if x])
strech_w = max(0, selfw - self.minimum_width)
for index in range(len(cols)):
# if the col don't have strech information, nothing to do
col_stretch = cols_sh[index]
if col_stretch is None:
continue
# calculate the column stretch, and take the maximum from
# minimum size and the calculated stretch
col_width = cols[index]
col_width = max(col_width, strech_w * col_stretch / cols_weigth)
cols[index] = col_width
# same algo for rows
if self.row_force_default:
rows = [self.row_default_height] * len(self._rows)
for index, value in self.rows_minimum.items():
rows[index] = value
else:
rows = self._rows[:]
rows_sh = self._rows_sh
rows_weigth = sum([x for x in rows_sh if x])
strech_h = max(0, selfh - self.minimum_height)
for index in range(len(rows)):
# if the row don't have strech information, nothing to do
row_stretch = rows_sh[index]
if row_stretch is None:
continue
# calculate the row stretch, and take the maximum from minimum
# size and the calculated stretch
row_height = rows[index]
row_height = max(row_height,
strech_h * row_stretch / rows_weigth)
rows[index] = row_height
# reposition every child
i = len_children - 1
y = self.top - padding_top
for row_height in rows:
x = selfx + padding_left
for col_width in cols:
if i < 0:
break
c = children[i]
c.x = x
c.y = y - row_height
c.width = col_width
c.height = row_height
i = i - 1
x = x + col_width + spacing_x
y -= row_height + spacing_y
|
WillisXChen/django-oscar
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_BE.py
|
1
|
"""Auto-generated file, do not edit by hand. BE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BE = PhoneMetadata(id='BE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}|[2-9]\\d{3}', possible_number_pattern='\\d{3,6}'),
toll_free=PhoneNumberDesc(national_number_pattern='1(?:0[25-8]|1(?:0|6\\d{3})|7(?:12|77))|8\\d{3}', possible_number_pattern='\\d{3,6}', example_number='116000'),
premium_rate=PhoneNumberDesc(national_number_pattern='1(?:212|3(?:0[47]|13)|4[01]4)|[2-79]\\d{3}', possible_number_pattern='\\d{4}', example_number='7212'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:0[01]|12)', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0[0-8]|1(?:[027]|6000)|2(?:0[47]|12|3[0-24]|99)|3(?:0[47]|13|99)|4(?:0[47]|14|50|99)|7(?:00|1[27]|33|65|77)|819)|[2-9]\\d{3}', possible_number_pattern='\\d{3,6}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
|
stpierre/sponge
|
refs/heads/master
|
Sponge/sponge/management/commands/__init__.py
|
1
|
__all__ = ["rebalanceschedule"]
|
tianyi33/simple_blog
|
refs/heads/master
|
django/contrib/localflavor/uk/uk_regions.py
|
114
|
from django.contrib.localflavor.gb.gb_regions import (
ENGLAND_REGION_CHOICES, NORTHERN_IRELAND_REGION_CHOICES,
WALES_REGION_CHOICES, SCOTTISH_REGION_CHOICES, GB_NATIONS_CHOICES,
GB_REGION_CHOICES)
import warnings
warnings.warn(
'The "UK" prefix for United Kingdom has been deprecated in favour of the '
'GB code. Please use the new GB-prefixed names.', DeprecationWarning)
UK_NATIONS_CHOICES = GB_NATIONS_CHOICES
UK_REGION_CHOICES = GB_REGION_CHOICES
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/plotly/validators/scatter3d/error_z/_color.py
|
1
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="scatter3d.error_z", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
schinke/solid-fortnight-ba
|
refs/heads/master
|
flask/venv/lib/python2.7/site-packages/psycopg2/tz.py
|
72
|
"""tzinfo implementations for psycopg2
This module holds two different tzinfo implementations that can be used as
the 'tzinfo' argument to datetime constructors, directly passed to psycopg
functions or used to set the .tzinfo_factory attribute in cursors.
"""
# psycopg/tz.py - tzinfo implementation
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import datetime
import time
ZERO = datetime.timedelta(0)
class FixedOffsetTimezone(datetime.tzinfo):
"""Fixed offset in minutes east from UTC.
This is exactly the implementation__ found in Python 2.3.x documentation,
with a small change to the `!__init__()` method to allow for pickling
and a default name in the form ``sHH:MM`` (``s`` is the sign.).
The implementation also caches instances. During creation, if a
FixedOffsetTimezone instance has previously been created with the same
offset and name that instance will be returned. This saves memory and
improves comparability.
.. __: http://docs.python.org/library/datetime.html#datetime-tzinfo
"""
_name = None
_offset = ZERO
_cache = {}
def __init__(self, offset=None, name=None):
if offset is not None:
self._offset = datetime.timedelta(minutes = offset)
if name is not None:
self._name = name
def __new__(cls, offset=None, name=None):
"""Return a suitable instance created earlier if it exists
"""
key = (offset, name)
try:
return cls._cache[key]
except KeyError:
tz = super(FixedOffsetTimezone, cls).__new__(cls, offset, name)
cls._cache[key] = tz
return tz
def __repr__(self):
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
% (offset_mins, self._name)
def __getinitargs__(self):
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return (offset_mins, self._name)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
if self._name is not None:
return self._name
else:
seconds = self._offset.seconds + self._offset.days * 86400
hours, seconds = divmod(seconds, 3600)
minutes = seconds/60
if minutes:
return "%+03d:%d" % (hours, minutes)
else:
return "%+03d" % hours
def dst(self, dt):
return ZERO
STDOFFSET = datetime.timedelta(seconds = -time.timezone)
if time.daylight:
DSTOFFSET = datetime.timedelta(seconds = -time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(datetime.tzinfo):
"""Platform idea of local timezone.
This is the exact implementation from the Python 2.3 documentation.
"""
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
LOCAL = LocalTimezone()
# TODO: pre-generate some interesting time zones?
|
shackra/thomas-aquinas
|
refs/heads/stable-branch
|
summa/audio/soundstatus.py
|
1
|
# coding: utf-8
# This file is part of Thomas Aquinas.
#
# Thomas Aquinas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Thomas Aquinas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Thomas Aquinas. If not, see <http://www.gnu.org/licenses/>.
#
# veni, Sancte Spiritus.
import ctypes
import logging
class SoundStatus(object):
stopped = 0
paused = 1
playing = 2
|
siosio/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertTypeCommentToVariableAnnotationIntentionTest/withStatementWithComplexUnpacking_after.py
|
31
|
x: io.FileIO
z: Optional[int]
y: Any
with undefined() \
as ((x, (z)), y):
pass
|
incaser/server-tools
|
refs/heads/8.0
|
base_suspend_security/__openerp__.py
|
17
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Suspend security",
"version": "8.0.1.0.0",
"author": "Therp BV",
"license": "AGPL-3",
"category": "Hidden/Dependency",
"summary": "Suspend security checks for a call",
"depends": [
'base',
],
"test": [
],
"auto_install": False,
"installable": True,
"application": False,
"external_dependencies": {
'python': [],
},
}
|
Goamaral/SCC
|
refs/heads/master
|
App.py
|
1
|
import inputWindow
import outputWindow
import simulador
from PyQt4 import QtGui
import sys
class InputWindow(QtGui.QMainWindow, inputWindow.Ui_MainWindow):
def __init__(self, parent=None):
super(InputWindow, self).__init__(parent)
self.setupUi(self)
self.botaoSimular.clicked.connect(self.goToOutputWindow)
self.relatorio = OutputWindow()
def goToOutputWindow(self):
media_cheg_A = float(self.mediaChegadaA.text())
dist_perfuracao_A = (float(self.mediaPerfuracaoA.text()), float(self.desvioPerfuracaoA.text()))
numero_maquinas_perfuracao_A = int(self.nMaquinasPerfuracaoA.text())
dist_polimento_A = (float(self.mediaPolimentoA.text()), float(self.desvioPolimentoA.text()))
numero_maquinas_polimento_A = int(self.nMaquinasPolimentoA.text())
media_cheg_B = float(self.mediaChegadaB.text())
dist_perfuracao_B = (float(self.mediaPerfuracaoB.text()), float(self.desvioPerfuracaoB.text()))
numero_maquinas_perfuracao_B = int(self.nMaquinasPerfuracaoB.text())
dist_polimento_B = (float(self.mediaPolimentoB.text()), float(self.desvioPolimentoB.text()))
numero_maquinas_polimento_B = int(self.nMaquinasPolimentoB.text())
dist_envernizamento = (float(self.mediaEnvernizamento.text()), float(self.desvioEnvernizamento.text()))
numero_maquinas_envernizamento = int(self.nMaquinasEnvernizamento.text())
if self.tipoLimite.currentIndex() == 0:
n_clientes = None
tempo_simulacao = int(self.valorLimite.text())
else:
n_clientes = int(self.valorLimite.text())
tempo_simulacao = None
n_repeticoes = int(self.nRepeticoes.text())
sum_media_espera_a_perfuracao = 0
sum_utilizacao_a_perfuracao = 0
sum_atendidos_a_perfuracao = 0
sum_clientes_fila_a_perfuracao = 0
sum_media_espera_a_polimento = 0
sum_utilizacao_a_polimento = 0
sum_atendidos_a_polimento = 0
sum_clientes_fila_a_polimento = 0
sum_media_espera_b_perfuracao = 0
sum_utilizacao_b_perfuracao = 0
sum_atendidos_b_perfuracao = 0
sum_clientes_fila_b_perfuracao = 0
sum_media_espera_b_polimento = 0
sum_utilizacao_b_polimento = 0
sum_atendidos_b_polimento = 0
sum_clientes_fila_b_polimento = 0
sum_media_espera_envernizamento = 0
sum_utilizacao_envernizamento = 0
sum_clientes_atendidos = 0
sum_clientes_fila_envernizamento = 0
for i in range(n_repeticoes):
self.botaoSimular.setText("%0.1f "%(i/n_repeticoes * 100,) + str(' %'))
QtGui.qApp.processEvents()
S = simulador.Simulador(media_cheg_A, dist_perfuracao_A, numero_maquinas_perfuracao_A,
dist_polimento_A, numero_maquinas_polimento_A, media_cheg_B, dist_perfuracao_B,
numero_maquinas_perfuracao_B, dist_polimento_B, numero_maquinas_polimento_B,
dist_envernizamento, numero_maquinas_envernizamento, i, tempo_simulacao, n_clientes)
S.executa()
sum_media_espera_a_perfuracao += S.a_perfuracao_relat["mediaEspera"]
sum_utilizacao_a_perfuracao += S.a_perfuracao_relat["utilizacao"]
sum_atendidos_a_perfuracao += S.a_perfuracao_relat["nClientesAtendidos"]
sum_clientes_fila_a_perfuracao += S.a_perfuracao_relat["nClientesFila"]
sum_media_espera_a_polimento += S.a_polimento_relat["mediaEspera"]
sum_utilizacao_a_polimento += S.a_polimento_relat["utilizacao"]
sum_atendidos_a_polimento += S.a_polimento_relat["nClientesAtendidos"]
sum_clientes_fila_a_polimento += S.a_polimento_relat["nClientesFila"]
sum_media_espera_b_perfuracao += S.b_perfuracao_relat["mediaEspera"]
sum_utilizacao_b_perfuracao += S.b_perfuracao_relat["utilizacao"]
sum_atendidos_b_perfuracao += S.b_perfuracao_relat["nClientesAtendidos"]
sum_clientes_fila_b_perfuracao += S.b_perfuracao_relat["nClientesFila"]
sum_media_espera_b_polimento += S.b_polimento_relat["mediaEspera"]
sum_utilizacao_b_polimento += S.b_polimento_relat["utilizacao"]
sum_atendidos_b_polimento += S.b_polimento_relat["nClientesAtendidos"]
sum_clientes_fila_b_polimento += S.b_polimento_relat["nClientesFila"]
sum_media_espera_envernizamento += S.envernizamento_relat["mediaEspera"]
sum_utilizacao_envernizamento += S.envernizamento_relat["utilizacao"]
sum_clientes_atendidos += S.envernizamento_relat["nClientesAtendidos"]
sum_clientes_fila_envernizamento += S.envernizamento_relat["nClientesFila"]
n_repeticoes = float(n_repeticoes)
media_espera_a_perfuracao = sum_media_espera_a_perfuracao / n_repeticoes
media_utilizacao_a_perfuracao = sum_utilizacao_a_perfuracao / n_repeticoes
media_clientes_atendidos_a_perfuracao = sum_atendidos_a_perfuracao / n_repeticoes
media_clientes_fila_a_perfuracao = sum_clientes_fila_a_perfuracao / n_repeticoes
media_espera_a_polimento = sum_media_espera_a_polimento / n_repeticoes
media_utilizacao_a_polimento = sum_utilizacao_a_polimento / n_repeticoes
media_clientes_atendidos_a_polimento = sum_atendidos_a_polimento / n_repeticoes
media_clientes_fila_a_polimento = sum_clientes_fila_a_polimento / n_repeticoes
media_espera_b_perfuracao = sum_media_espera_b_perfuracao / n_repeticoes
media_utilizacao_b_perfuracao = sum_utilizacao_b_perfuracao / n_repeticoes
media_clientes_atendidos_b_perfuracao = sum_atendidos_b_perfuracao / n_repeticoes
media_clientes_fila_b_perfuracao = sum_clientes_fila_b_perfuracao / n_repeticoes
media_espera_b_polimento = sum_media_espera_b_polimento / n_repeticoes
media_utilizacao_b_polimento = sum_utilizacao_b_polimento / n_repeticoes
media_clientes_atendidos_b_polimento = sum_atendidos_b_polimento / n_repeticoes
media_clientes_fila_b_polimento = sum_clientes_fila_b_polimento / n_repeticoes
media_utilizacao_envernizamento = sum_utilizacao_envernizamento / n_repeticoes
media_espera_envernizamento = sum_media_espera_envernizamento / n_repeticoes
media_clientes_atendidos = sum_clientes_atendidos / n_repeticoes
media_clientes_fila_envernizamento = sum_clientes_fila_envernizamento / n_repeticoes
self.close()
self.relatorio.mEsperaPerfuracaoA.setText("%0.3f"%(media_espera_a_perfuracao,))
self.relatorio.utilPerfuracaoA.setText("%0.3f"%(media_utilizacao_a_perfuracao,))
self.relatorio.atendidosPerfuracaoA.setText("%0.3f"%(media_clientes_atendidos_a_perfuracao,))
self.relatorio.compPerfuracaoA.setText("%0.3f"%(media_clientes_fila_a_perfuracao,))
self.relatorio.mEsperaPolimentoA.setText("%0.3f"%(media_espera_a_polimento,))
self.relatorio.utilPolimentoA.setText("%0.3f"%(media_utilizacao_a_polimento,))
self.relatorio.atendidosPolimentoA.setText("%0.3f"%(media_clientes_atendidos_a_polimento,))
self.relatorio.compPolimentoA.setText("%0.3f"%(media_clientes_fila_a_polimento,))
self.relatorio.mEsperaPerfuracaoB.setText("%0.3f"%(media_espera_b_perfuracao,))
self.relatorio.utilPerfuracaoB.setText("%0.3f"%(media_utilizacao_b_perfuracao,))
self.relatorio.atendidosPerfuracaoB.setText("%0.3f"%(media_clientes_atendidos_b_perfuracao,))
self.relatorio.compPerfuracaoB.setText("%0.3f"%(media_clientes_fila_b_perfuracao,))
self.relatorio.mEsperaPolimentoB.setText("%0.3f"%(media_espera_b_polimento,))
self.relatorio.utilPolimentoB.setText("%0.3f"%(media_utilizacao_b_polimento,))
self.relatorio.atendidosPolimentoB.setText("%0.3f"%(media_clientes_atendidos_b_polimento,))
self.relatorio.compPolimentoB.setText("%0.3f"%(media_clientes_fila_b_polimento,))
self.relatorio.mEsperaEnvernizamento.setText("%0.3f"%(media_espera_envernizamento,))
self.relatorio.utilEnvernizamento.setText("%0.3f"%(media_utilizacao_envernizamento,))
self.relatorio.clientesAtendidos.setText("%0.3f"%(media_clientes_atendidos,))
self.relatorio.compEnvernizamento.setText("%0.3f"%(media_clientes_fila_envernizamento,))
#MEDIA DISTO TAMBEM SO SE FIZERMOS PARA O NUMERO DE CLIENTES
self.relatorio.tempoSimulacao.setText("%d"%(S.instant))
self.relatorio.nRepeticoes.setText("%d"%(n_repeticoes))
self.relatorio.show()
class OutputWindow(QtGui.QMainWindow, outputWindow.Ui_MainWindow):
def __init__(self, parent=None):
super(OutputWindow, self).__init__(parent)
self.setupUi(self)
def main():
app = QtGui.QApplication(sys.argv)
descricaoSimulacao = InputWindow()
descricaoSimulacao.mediaChegadaA.setText('5')
descricaoSimulacao.mediaPerfuracaoA.setText('2')
descricaoSimulacao.desvioPerfuracaoA.setText('0.7')
descricaoSimulacao.nMaquinasPerfuracaoA.setText('1')
descricaoSimulacao.mediaPolimentoA.setText('4')
descricaoSimulacao.desvioPolimentoA.setText('1.2')
descricaoSimulacao.nMaquinasPolimentoA.setText('1')
descricaoSimulacao.mediaChegadaB.setText('1.33')
descricaoSimulacao.mediaPerfuracaoB.setText('0.75')
descricaoSimulacao.desvioPerfuracaoB.setText('0.3')
descricaoSimulacao.nMaquinasPerfuracaoB.setText('1')
descricaoSimulacao.mediaPolimentoB.setText('3')
descricaoSimulacao.desvioPolimentoB.setText('1')
descricaoSimulacao.nMaquinasPolimentoB.setText('2')
descricaoSimulacao.mediaEnvernizamento.setText('1.4')
descricaoSimulacao.desvioEnvernizamento.setText('0.3')
descricaoSimulacao.nMaquinasEnvernizamento.setText('2')
descricaoSimulacao.tipoLimite.setCurrentIndex(1)
descricaoSimulacao.valorLimite.setText('1000')
descricaoSimulacao.nRepeticoes.setText('1')
descricaoSimulacao.show()
app.exec_()
if __name__ == '__main__':
main()
|
cloudera/ibis
|
refs/heads/master
|
ibis/backends/pandas/client.py
|
1
|
"""The pandas client implementation."""
from __future__ import absolute_import
import re
from functools import partial
import dateutil.parser
import numpy as np
import pandas as pd
import pytz
import toolz
from multipledispatch import Dispatcher
from pandas.api.types import CategoricalDtype, DatetimeTZDtype
from pkg_resources import parse_version
import ibis.client as client
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.schema as sch
import ibis.expr.types as ir
from .core import execute_and_reset
infer_pandas_dtype = pd.api.types.infer_dtype
_ibis_dtypes = toolz.valmap(
np.dtype,
{
dt.Boolean: np.bool_,
dt.Null: np.object_,
dt.Array: np.object_,
dt.String: np.object_,
dt.Binary: np.object_,
dt.Date: 'datetime64[ns]',
dt.Time: 'timedelta64[ns]',
dt.Timestamp: 'datetime64[ns]',
dt.Int8: np.int8,
dt.Int16: np.int16,
dt.Int32: np.int32,
dt.Int64: np.int64,
dt.UInt8: np.uint8,
dt.UInt16: np.uint16,
dt.UInt32: np.uint32,
dt.UInt64: np.uint64,
dt.Float32: np.float32,
dt.Float64: np.float64,
dt.Decimal: np.object_,
dt.Struct: np.object_,
},
)
_numpy_dtypes = toolz.keymap(
np.dtype,
{
'bool': dt.boolean,
'int8': dt.int8,
'int16': dt.int16,
'int32': dt.int32,
'int64': dt.int64,
'uint8': dt.uint8,
'uint16': dt.uint16,
'uint32': dt.uint32,
'uint64': dt.uint64,
'float16': dt.float16,
'float32': dt.float32,
'float64': dt.float64,
'double': dt.double,
'unicode': dt.string,
'str': dt.string,
'datetime64': dt.timestamp,
'datetime64[ns]': dt.timestamp,
'timedelta64': dt.interval,
'timedelta64[ns]': dt.Interval('ns'),
},
)
_inferable_pandas_dtypes = {
'boolean': dt.boolean,
'string': dt.string,
'unicode': dt.string,
'bytes': dt.string,
'empty': dt.string,
}
@dt.dtype.register(np.dtype)
def from_numpy_dtype(value):
try:
return _numpy_dtypes[value]
except KeyError:
raise TypeError(
'numpy dtype {!r} is not supported in the pandas backend'.format(
value
)
)
@dt.dtype.register(DatetimeTZDtype)
def from_pandas_tzdtype(value):
return dt.Timestamp(timezone=str(value.tz))
@dt.dtype.register(CategoricalDtype)
def from_pandas_categorical(value):
return dt.Category()
@dt.infer.register(
(np.generic,)
+ tuple(
frozenset(
np.signedinteger.__subclasses__()
+ np.unsignedinteger.__subclasses__() # np.int64, np.uint64, etc.
)
) # we need this because in Python 2 int is a parent of np.integer
)
def infer_numpy_scalar(value):
return dt.dtype(value.dtype)
@dt.infer.register(pd.Timestamp)
def infer_pandas_timestamp(value):
if value.tz is not None:
return dt.Timestamp(timezone=str(value.tz))
else:
return dt.timestamp
@dt.infer.register(np.ndarray)
def infer_array(value):
# TODO(kszucs): infer series
return dt.Array(dt.dtype(value.dtype.name))
@sch.schema.register(pd.Series)
def schema_from_series(s):
return sch.schema(tuple(s.iteritems()))
@sch.infer.register(pd.DataFrame)
def infer_pandas_schema(df, schema=None):
schema = schema if schema is not None else {}
pairs = []
for column_name, pandas_dtype in df.dtypes.iteritems():
if not isinstance(column_name, str):
raise TypeError(
'Column names must be strings to use the pandas backend'
)
if column_name in schema:
ibis_dtype = dt.dtype(schema[column_name])
elif pandas_dtype == np.object_:
inferred_dtype = infer_pandas_dtype(df[column_name], skipna=True)
if inferred_dtype in {'mixed', 'decimal'}:
# TODO: in principal we can handle decimal (added in pandas
# 0.23)
raise TypeError(
'Unable to infer type of column {0!r}. Try instantiating '
'your table from the client with client.table('
"'my_table', schema={{{0!r}: <explicit type>}})".format(
column_name
)
)
ibis_dtype = _inferable_pandas_dtypes[inferred_dtype]
else:
ibis_dtype = dt.dtype(pandas_dtype)
pairs.append((column_name, ibis_dtype))
return sch.schema(pairs)
def ibis_dtype_to_pandas(ibis_dtype):
"""Convert ibis dtype to the pandas / numpy alternative"""
assert isinstance(ibis_dtype, dt.DataType)
if isinstance(ibis_dtype, dt.Timestamp) and ibis_dtype.timezone:
return DatetimeTZDtype('ns', ibis_dtype.timezone)
elif isinstance(ibis_dtype, dt.Interval):
return np.dtype('timedelta64[{}]'.format(ibis_dtype.unit))
elif isinstance(ibis_dtype, dt.Category):
return CategoricalDtype()
elif type(ibis_dtype) in _ibis_dtypes:
return _ibis_dtypes[type(ibis_dtype)]
else:
return np.dtype(np.object_)
def ibis_schema_to_pandas(schema):
return list(zip(schema.names, map(ibis_dtype_to_pandas, schema.types)))
convert = Dispatcher(
'convert',
doc="""\
Convert `column` to the pandas dtype corresponding to `out_dtype`, where the
dtype of `column` is `in_dtype`.
Parameters
----------
in_dtype : Union[np.dtype, pandas_dtype]
The dtype of `column`, used for dispatching
out_dtype : ibis.expr.datatypes.DataType
The requested ibis type of the output
column : pd.Series
The column to convert
Returns
-------
result : pd.Series
The converted column
""",
)
@convert.register(DatetimeTZDtype, dt.Timestamp, pd.Series)
def convert_datetimetz_to_timestamp(in_dtype, out_dtype, column):
output_timezone = out_dtype.timezone
if output_timezone is not None:
return column.dt.tz_convert(output_timezone)
return column.astype(out_dtype.to_pandas(), errors='ignore')
def convert_timezone(obj, timezone):
"""Convert `obj` to the timezone `timezone`.
Parameters
----------
obj : datetime.date or datetime.datetime
Returns
-------
type(obj)
"""
if timezone is None:
return obj.replace(tzinfo=None)
return pytz.timezone(timezone).localize(obj)
PANDAS_STRING_TYPES = {'string', 'unicode', 'bytes'}
PANDAS_DATE_TYPES = {'datetime', 'datetime64', 'date'}
@convert.register(np.dtype, dt.Timestamp, pd.Series)
def convert_datetime64_to_timestamp(in_dtype, out_dtype, column):
if in_dtype.type == np.datetime64:
return column.astype(out_dtype.to_pandas(), errors='ignore')
try:
series = pd.to_datetime(column, utc=True)
except pd.errors.OutOfBoundsDatetime:
inferred_dtype = infer_pandas_dtype(column, skipna=True)
if inferred_dtype in PANDAS_DATE_TYPES:
# not great, but not really any other option
return column.map(
partial(convert_timezone, timezone=out_dtype.timezone)
)
if inferred_dtype not in PANDAS_STRING_TYPES:
raise TypeError(
(
'Conversion to timestamp not supported for Series of type '
'{!r}'
).format(inferred_dtype)
)
return column.map(dateutil.parser.parse)
else:
utc_dtype = DatetimeTZDtype('ns', 'UTC')
return series.astype(utc_dtype).dt.tz_convert(out_dtype.timezone)
@convert.register(np.dtype, dt.Interval, pd.Series)
def convert_any_to_interval(_, out_dtype, column):
return column.values.astype(out_dtype.to_pandas())
@convert.register(np.dtype, dt.String, pd.Series)
def convert_any_to_string(_, out_dtype, column):
result = column.astype(out_dtype.to_pandas(), errors='ignore')
return result
@convert.register(np.dtype, dt.Boolean, pd.Series)
def convert_boolean_to_series(in_dtype, out_dtype, column):
# XXX: this is a workaround until #1595 can be addressed
in_dtype_type = in_dtype.type
out_dtype_type = out_dtype.to_pandas().type
if in_dtype_type != np.object_ and in_dtype_type != out_dtype_type:
return column.astype(out_dtype_type)
return column
@convert.register(object, dt.DataType, pd.Series)
def convert_any_to_any(_, out_dtype, column):
return column.astype(out_dtype.to_pandas(), errors='ignore')
def ibis_schema_apply_to(schema, df):
"""Applies the Ibis schema to a pandas DataFrame
Parameters
----------
schema : ibis.schema.Schema
df : pandas.DataFrame
Returns
-------
df : pandas.DataFrame
Notes
-----
Mutates `df`
"""
for column, dtype in schema.items():
pandas_dtype = dtype.to_pandas()
col = df[column]
col_dtype = col.dtype
try:
not_equal = pandas_dtype != col_dtype
except TypeError:
# ugh, we can't compare dtypes coming from pandas, assume not equal
not_equal = True
if not_equal or isinstance(dtype, dt.String):
df[column] = convert(col_dtype, dtype, col)
return df
dt.DataType.to_pandas = ibis_dtype_to_pandas
sch.Schema.to_pandas = ibis_schema_to_pandas
sch.Schema.apply_to = ibis_schema_apply_to
class PandasTable(ops.DatabaseTable):
pass
class PandasClient(client.Client):
dialect = None # defined in ibis.pandas.api
def __init__(self, dictionary):
self.dictionary = dictionary
def table(self, name, schema=None):
df = self.dictionary[name]
schema = sch.infer(df, schema=schema)
return PandasTable(name, schema, self).to_expr()
def execute(self, query, params=None, limit='default', **kwargs):
if limit != 'default':
raise ValueError(
'limit parameter to execute is not yet implemented in the '
'pandas backend'
)
if not isinstance(query, ir.Expr):
raise TypeError(
"`query` has type {!r}, expected ibis.expr.types.Expr".format(
type(query).__name__
)
)
return execute_and_reset(query, params=params, **kwargs)
def compile(self, expr, *args, **kwargs):
"""Compile `expr`.
Notes
-----
For the pandas backend this is a no-op.
"""
return expr
def database(self, name=None):
"""Construct a database called `name`."""
return PandasDatabase(name, self)
def list_tables(self, like=None):
"""List the available tables."""
tables = list(self.dictionary.keys())
if like is not None:
pattern = re.compile(like)
return list(filter(lambda t: pattern.findall(t), tables))
return tables
def load_data(self, table_name, obj, **kwargs):
"""Load data from `obj` into `table_name`.
Parameters
----------
table_name : str
obj : pandas.DataFrame
"""
# kwargs is a catch all for any options required by other backends.
self.dictionary[table_name] = pd.DataFrame(obj)
def create_table(self, table_name, obj=None, schema=None):
"""Create a table."""
if obj is None and schema is None:
raise com.IbisError('Must pass expr or schema')
if obj is not None:
df = pd.DataFrame(obj)
else:
dtypes = ibis_schema_to_pandas(schema)
df = schema.apply_to(
pd.DataFrame(columns=list(map(toolz.first, dtypes)))
)
self.dictionary[table_name] = df
def get_schema(self, table_name, database=None):
"""Return a Schema object for the indicated table and database.
Parameters
----------
table_name : str
May be fully qualified
database : str
Returns
-------
ibis.expr.schema.Schema
"""
return sch.infer(self.dictionary[table_name])
def exists_table(self, name):
"""Determine if the indicated table or view exists.
Parameters
----------
name : str
database : str
Returns
-------
bool
"""
return bool(self.list_tables(like=name))
@property
def version(self) -> str:
"""Return the version of the underlying backend library."""
return parse_version(pd.__version__)
class PandasDatabase(client.Database):
pass
|
bas-t/media_tree
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
1996
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
GhostThrone/django
|
refs/heads/master
|
django/core/handlers/wsgi.py
|
339
|
from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
|
kool79/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/geometry/backend/__init__.py
|
388
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
geom_backend = getattr(settings, 'GEOMETRY_BACKEND', 'geos')
try:
module = import_module('.%s' % geom_backend, 'django.contrib.gis.geometry.backend')
except ImportError, e:
try:
module = import_module(geom_backend)
except ImportError, e_user:
raise ImproperlyConfigured('Could not import user-defined GEOMETRY_BACKEND '
'"%s".' % geom_backend)
try:
Geometry = module.Geometry
GeometryException = module.GeometryException
except AttributeError:
raise ImproperlyConfigured('Cannot import Geometry from the "%s" '
'geometry backend.' % geom_backend)
|
gltn/stdm
|
refs/heads/master
|
stdm/third_party/FontTools/fontTools/ttLib/tables/_a_v_a_r.py
|
1
|
from fontTools.misc.py23 import *
from fontTools import ttLib
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
from fontTools.misc.textTools import safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
import array
import struct
import logging
log = logging.getLogger(__name__)
# Apple's documentation of 'avar':
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html
AVAR_HEADER_FORMAT = """
> # big endian
majorVersion: H
minorVersion: H
reserved: H
axisCount: H
"""
assert sstruct.calcsize(AVAR_HEADER_FORMAT) == 8, sstruct.calcsize(AVAR_HEADER_FORMAT)
class table__a_v_a_r(DefaultTable.DefaultTable):
dependencies = ["fvar"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.segments = {}
def compile(self, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
header = {
"majorVersion": 1,
"minorVersion": 0,
"reserved": 0,
"axisCount": len(axisTags)
}
result = [sstruct.pack(AVAR_HEADER_FORMAT, header)]
for axis in axisTags:
mappings = sorted(self.segments[axis].items())
result.append(struct.pack(">H", len(mappings)))
for key, value in mappings:
fixedKey = fl2fi(key, 14)
fixedValue = fl2fi(value, 14)
result.append(struct.pack(">hh", fixedKey, fixedValue))
return bytesjoin(result)
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
header = {}
headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT)
header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize])
majorVersion = header["majorVersion"]
if majorVersion != 1:
raise TTLibError("unsupported 'avar' version %d" % majorVersion)
pos = headerSize
for axis in axisTags:
segments = self.segments[axis] = {}
numPairs = struct.unpack(">H", data[pos:pos+2])[0]
pos = pos + 2
for _ in range(numPairs):
fromValue, toValue = struct.unpack(">hh", data[pos:pos+4])
segments[fi2fl(fromValue, 14)] = fi2fl(toValue, 14)
pos = pos + 4
def toXML(self, writer, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
for axis in axisTags:
writer.begintag("segment", axis=axis)
writer.newline()
for key, value in sorted(self.segments[axis].items()):
key = fl2str(key, 14)
value = fl2str(value, 14)
writer.simpletag("mapping", **{"from": key, "to": value})
writer.newline()
writer.endtag("segment")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "segment":
axis = attrs["axis"]
segment = self.segments[axis] = {}
for element in content:
if isinstance(element, tuple):
elementName, elementAttrs, _ = element
if elementName == "mapping":
fromValue = str2fl(elementAttrs["from"], 14)
toValue = str2fl(elementAttrs["to"], 14)
if fromValue in segment:
log.warning("duplicate entry for %s in axis '%s'",
fromValue, axis)
segment[fromValue] = toValue
|
pschella/scipy
|
refs/heads/master
|
scipy/special/tests/test_round.py
|
20
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import dec
from scipy.special import _test_round
@dec.skipif(not _test_round.have_fenv())
def test_add_round_up():
np.random.seed(1234)
_test_round.test_add_round(10**5, 'up')
@dec.skipif(not _test_round.have_fenv())
def test_add_round_down():
np.random.seed(1234)
_test_round.test_add_round(10**5, 'down')
|
PUNCH-Cyber/stoq-plugins-public
|
refs/heads/master
|
symhash/setup.py
|
1
|
from setuptools import setup, find_packages
setup(
name="symhash",
version="3.0.0",
author="Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public/v2",
license="Apache License 2.0",
description="Calculate symbol table hashes of a Mach-O executable file",
packages=find_packages(),
include_package_data=True,
)
|
kouaw/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/franceinter.py
|
15
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class FranceInterIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
'file': '793962.mp3',
'md5': '4764932e466e6f6c79c317d2e74f6884',
"info_dict": {
"title": "L’Histoire dans les jeux vidéo",
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
path = self._search_regex(
r'&urlAOD=(.*?)&startTime', webpage, 'video url')
video_url = 'http://www.franceinter.fr/' + path
return {
'id': video_id,
'formats': [{
'url': video_url,
'vcodec': 'none',
}],
'title': title,
}
|
Edraak/edraak-platform
|
refs/heads/master
|
common/djangoapps/course_modes/admin.py
|
11
|
from django import forms
from django.conf import settings
from django.contrib import admin
from django.http.request import QueryDict
from django.utils.translation import ugettext_lazy as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from pytz import UTC, timezone
from course_modes.models import CourseMode, CourseModeExpirationConfig
# Technically, we shouldn't be doing this, since verify_student is defined
# in LMS, and course_modes is defined in common.
#
# Once we move the responsibility for administering course modes into
# the Course Admin tool, we can remove this dependency and expose
# verification deadlines as a separate Django model admin.
#
# The admin page will work in both LMS and Studio,
# but the test suite for Studio will fail because
# the verification deadline table won't exist.
from lms.djangoapps.verify_student import models as verification_models
from openedx.core.lib.courses import clean_course_id
from util.date_utils import get_time_display
from xmodule.modulestore.django import modulestore
COURSE_MODE_SLUG_CHOICES = [(mode_slug, mode_slug) for mode_slug in settings.COURSE_ENROLLMENT_MODES]
class CourseModeForm(forms.ModelForm):
"""
Admin form for adding a course mode.
"""
class Meta(object):
model = CourseMode
fields = '__all__'
mode_slug = forms.ChoiceField(choices=COURSE_MODE_SLUG_CHOICES, label=_("Mode"))
# The verification deadline is stored outside the course mode in the verify_student app.
# (we used to use the course mode expiration_datetime as both an upgrade and verification deadline).
# In order to make this transition easier, we include the verification deadline as a custom field
# in the course mode admin form. Longer term, we will deprecate the course mode Django admin
# form in favor of an external Course Administration Tool.
verification_deadline = forms.SplitDateTimeField(
label=_("Verification Deadline"),
required=False,
help_text=_(
"OPTIONAL: After this date/time, users will no longer be able to submit photos for verification. "
"This appies ONLY to modes that require verification."
),
widget=admin.widgets.AdminSplitDateTime,
)
def __init__(self, *args, **kwargs):
# If args is a QueryDict, then the ModelForm addition request came in as a POST with a course ID string.
# Change the course ID string to a CourseLocator object by copying the QueryDict to make it mutable.
if len(args) > 0 and 'course' in args[0] and isinstance(args[0], QueryDict):
args_copy = args[0].copy()
args_copy['course'] = CourseKey.from_string(args_copy['course'])
args = [args_copy]
super(CourseModeForm, self).__init__(*args, **kwargs)
try:
if self.data.get('course'):
self.data['course'] = CourseKey.from_string(self.data['course'])
except AttributeError:
# Change the course ID string to a CourseLocator.
# On a POST request, self.data is a QueryDict and is immutable - so this code will fail.
# However, the args copy above before the super() call handles this case.
pass
default_tz = timezone(settings.TIME_ZONE)
if self.instance._expiration_datetime: # pylint: disable=protected-access
# django admin is using default timezone. To avoid time conversion from db to form
# convert the UTC object to naive and then localize with default timezone.
_expiration_datetime = self.instance._expiration_datetime.replace( # pylint: disable=protected-access
tzinfo=None
)
self.initial["_expiration_datetime"] = default_tz.localize(_expiration_datetime)
# Load the verification deadline
# Since this is stored on a model in verify student, we need to load it from there.
# We need to munge the timezone a bit to get Django admin to display it without converting
# it to the user's timezone. We'll make sure we write it back to the database with the timezone
# set to UTC later.
if self.instance.course_id and self.instance.mode_slug in CourseMode.VERIFIED_MODES:
deadline = verification_models.VerificationDeadline.deadline_for_course(self.instance.course_id)
self.initial["verification_deadline"] = (
default_tz.localize(deadline.replace(tzinfo=None))
if deadline is not None else None
)
def clean_course_id(self):
"""
Validate the course id
"""
return clean_course_id(self)
def clean__expiration_datetime(self):
"""
Ensure that the expiration datetime we save uses the UTC timezone.
"""
# django admin saving the date with default timezone to avoid time conversion from form to db
# changes its tzinfo to UTC
if self.cleaned_data.get("_expiration_datetime"):
return self.cleaned_data.get("_expiration_datetime").replace(tzinfo=UTC)
def clean_verification_deadline(self):
"""
Ensure that the verification deadline we save uses the UTC timezone.
"""
if self.cleaned_data.get("verification_deadline"):
return self.cleaned_data.get("verification_deadline").replace(tzinfo=UTC)
def clean(self):
"""
Clean the form fields.
This is the place to perform checks that involve multiple form fields.
"""
cleaned_data = super(CourseModeForm, self).clean()
mode_slug = cleaned_data.get("mode_slug")
upgrade_deadline = cleaned_data.get("_expiration_datetime")
verification_deadline = cleaned_data.get("verification_deadline")
# Allow upgrade deadlines ONLY for the "verified" mode
# This avoids a nasty error condition in which the upgrade deadline is set
# for a professional education course before the enrollment end date.
# When this happens, the course mode expires and students are able to enroll
# in the course for free. To avoid this, we explicitly prevent admins from
# setting an upgrade deadline for any mode except "verified" (which has an upgrade path).
if upgrade_deadline is not None and mode_slug != CourseMode.VERIFIED:
raise forms.ValidationError(
'Only the "verified" mode can have an upgrade deadline. '
'For other modes, please set the enrollment end date in Studio.'
)
# Verification deadlines are allowed only for verified modes
if verification_deadline is not None and mode_slug not in CourseMode.VERIFIED_MODES:
raise forms.ValidationError("Verification deadline can be set only for verified modes.")
# Verification deadline must be after the upgrade deadline,
# if an upgrade deadline is set.
# There are cases in which we might want to set a verification deadline,
# but not an upgrade deadline (for example, a professional education course that requires verification).
if verification_deadline is not None:
if upgrade_deadline is not None and verification_deadline < upgrade_deadline:
raise forms.ValidationError("Verification deadline must be after the upgrade deadline.")
return cleaned_data
def save(self, commit=True):
"""
Save the form data.
"""
# Trigger validation so we can access cleaned data
if self.is_valid():
course = self.cleaned_data.get("course")
verification_deadline = self.cleaned_data.get("verification_deadline")
mode_slug = self.cleaned_data.get("mode_slug")
# Since the verification deadline is stored in a separate model,
# we need to handle saving this ourselves.
# Note that verification deadline can be `None` here if
# the deadline is being disabled.
if course is not None and mode_slug in CourseMode.VERIFIED_MODES:
verification_models.VerificationDeadline.set_deadline(
course.id,
verification_deadline
)
return super(CourseModeForm, self).save(commit=commit)
@admin.register(CourseMode)
class CourseModeAdmin(admin.ModelAdmin):
"""Admin for course modes"""
form = CourseModeForm
fields = (
'course',
'mode_slug',
'mode_display_name',
'min_price',
'currency',
'_expiration_datetime',
'verification_deadline',
'sku',
'bulk_sku'
)
search_fields = ('course__id',)
list_display = (
'id',
'course',
'mode_slug',
'min_price',
'expiration_datetime_custom',
'sku',
'bulk_sku'
)
def expiration_datetime_custom(self, obj):
"""adding custom column to show the expiry_datetime"""
if obj.expiration_datetime:
return get_time_display(obj.expiration_datetime, '%B %d, %Y, %H:%M %p')
# Display a more user-friendly name for the custom expiration datetime field
# in the Django admin list view.
expiration_datetime_custom.short_description = "Upgrade Deadline"
admin.site.register(CourseModeExpirationConfig)
|
sigysmund/platform_external_skia
|
refs/heads/master
|
tools/roll_deps.py
|
68
|
#!/usr/bin/python2
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Skia's Chromium DEPS roll script.
This script:
- searches through the last N Skia git commits to find out the hash that is
associated with the SVN revision number.
- creates a new branch in the Chromium tree, modifies the DEPS file to
point at the given Skia commit, commits, uploads to Rietveld, and
deletes the local copy of the branch.
- creates a whitespace-only commit and uploads that to to Rietveld.
- returns the Chromium tree to its previous state.
To specify the location of the git executable, set the GIT_EXECUTABLE
environment variable.
Usage:
%prog -c CHROMIUM_PATH -r REVISION [OPTIONAL_OPTIONS]
"""
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
import git_utils
import misc_utils
DEFAULT_BOTS_LIST = [
'android_clang_dbg',
'android_dbg',
'android_rel',
'cros_daisy',
'linux',
'linux_asan',
'linux_chromeos',
'linux_chromeos_asan',
'linux_chromium_gn_dbg',
'linux_gpu',
'linux_layout',
'linux_layout_rel',
'mac',
'mac_asan',
'mac_gpu',
'mac_layout',
'mac_layout_rel',
'win',
'win_gpu',
'win_layout',
'win_layout_rel',
]
class DepsRollConfig(object):
"""Contains configuration options for this module.
Attributes:
git: (string) The git executable.
chromium_path: (string) path to a local chromium git repository.
save_branches: (boolean) iff false, delete temporary branches.
verbose: (boolean) iff false, suppress the output from git-cl.
search_depth: (int) how far back to look for the revision.
skia_url: (string) Skia's git repository.
self.skip_cl_upload: (boolean)
self.cl_bot_list: (list of strings)
"""
# pylint: disable=I0011,R0903,R0902
def __init__(self, options=None):
self.skia_url = 'https://skia.googlesource.com/skia.git'
self.revision_format = (
'git-svn-id: http://skia.googlecode.com/svn/trunk@%d ')
self.git = git_utils.git_executable()
if not options:
options = DepsRollConfig.GetOptionParser()
# pylint: disable=I0011,E1103
self.verbose = options.verbose
self.vsp = misc_utils.VerboseSubprocess(self.verbose)
self.save_branches = not options.delete_branches
self.search_depth = options.search_depth
self.chromium_path = options.chromium_path
self.skip_cl_upload = options.skip_cl_upload
# Split and remove empty strigns from the bot list.
self.cl_bot_list = [bot for bot in options.bots.split(',') if bot]
self.skia_git_checkout_path = options.skia_git_path
self.default_branch_name = 'autogenerated_deps_roll_branch'
self.reviewers_list = ','.join([
# 'rmistry@google.com',
# 'reed@google.com',
# 'bsalomon@google.com',
# 'robertphillips@google.com',
])
self.cc_list = ','.join([
# 'skia-team@google.com',
])
@staticmethod
def GetOptionParser():
# pylint: disable=I0011,C0103
"""Returns an optparse.OptionParser object.
Returns:
An optparse.OptionParser object.
Called by the main() function.
"""
option_parser = optparse.OptionParser(usage=__doc__)
# Anyone using this script on a regular basis should set the
# CHROMIUM_CHECKOUT_PATH environment variable.
option_parser.add_option(
'-c', '--chromium_path', help='Path to local Chromium Git'
' repository checkout, defaults to CHROMIUM_CHECKOUT_PATH'
' if that environment variable is set.',
default=os.environ.get('CHROMIUM_CHECKOUT_PATH'))
option_parser.add_option(
'-r', '--revision', type='int', default=None,
help='The Skia SVN revision number, defaults to top of tree.')
option_parser.add_option(
'-g', '--git_hash', default=None,
help='A partial Skia Git hash. Do not set this and revision.')
# Anyone using this script on a regular basis should set the
# SKIA_GIT_CHECKOUT_PATH environment variable.
option_parser.add_option(
'', '--skia_git_path',
help='Path of a pure-git Skia repository checkout. If empty,'
' a temporary will be cloned. Defaults to SKIA_GIT_CHECKOUT'
'_PATH, if that environment variable is set.',
default=os.environ.get('SKIA_GIT_CHECKOUT_PATH'))
option_parser.add_option(
'', '--search_depth', type='int', default=100,
help='How far back to look for the revision.')
option_parser.add_option(
'', '--delete_branches', help='Delete the temporary branches',
action='store_true', dest='delete_branches', default=False)
option_parser.add_option(
'', '--verbose', help='Do not suppress the output from `git cl`.',
action='store_true', dest='verbose', default=False)
option_parser.add_option(
'', '--skip_cl_upload', help='Skip the cl upload step; useful'
' for testing.',
action='store_true', default=False)
default_bots_help = (
'Comma-separated list of bots, defaults to a list of %d bots.'
' To skip `git cl try`, set this to an empty string.'
% len(DEFAULT_BOTS_LIST))
default_bots = ','.join(DEFAULT_BOTS_LIST)
option_parser.add_option(
'', '--bots', help=default_bots_help, default=default_bots)
return option_parser
class DepsRollError(Exception):
"""Exceptions specific to this module."""
pass
def get_svn_revision(config, commit):
"""Works in both git and git-svn. returns a string."""
svn_format = (
'(git-svn-id: [^@ ]+@|SVN changes up to revision |'
'LKGR w/ DEPS up to revision )(?P<return>[0-9]+)')
svn_revision = misc_utils.ReSearch.search_within_output(
config.verbose, svn_format, None,
[config.git, 'log', '-n', '1', '--format=format:%B', commit])
if not svn_revision:
raise DepsRollError(
'Revision number missing from Chromium origin/master.')
return int(svn_revision)
class SkiaGitCheckout(object):
"""Class to create a temporary skia git checkout, if necessary.
"""
# pylint: disable=I0011,R0903
def __init__(self, config, depth):
self._config = config
self._depth = depth
self._use_temp = None
self._original_cwd = None
def __enter__(self):
config = self._config
git = config.git
skia_dir = None
self._original_cwd = os.getcwd()
if config.skia_git_checkout_path:
if config.skia_git_checkout_path != os.curdir:
skia_dir = config.skia_git_checkout_path
## Update origin/master if needed.
if self._config.verbose:
print '~~$', 'cd', skia_dir
os.chdir(skia_dir)
config.vsp.check_call([git, 'fetch', '-q', 'origin'])
self._use_temp = None
else:
skia_dir = tempfile.mkdtemp(prefix='git_skia_tmp_')
self._use_temp = skia_dir
try:
os.chdir(skia_dir)
config.vsp.check_call(
[git, 'clone', '-q', '--depth=%d' % self._depth,
'--single-branch', config.skia_url, '.'])
except (OSError, subprocess.CalledProcessError) as error:
shutil.rmtree(skia_dir)
raise error
def __exit__(self, etype, value, traceback):
if self._config.skia_git_checkout_path != os.curdir:
if self._config.verbose:
print '~~$', 'cd', self._original_cwd
os.chdir(self._original_cwd)
if self._use_temp:
shutil.rmtree(self._use_temp)
def revision_and_hash(config):
"""Finds revision number and git hash of origin/master in the Skia tree.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
Returns:
A tuple (revision, hash)
revision: (int) SVN revision number.
git_hash: (string) full Git commit hash.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
with SkiaGitCheckout(config, 1):
revision = get_svn_revision(config, 'origin/master')
git_hash = config.vsp.strip_output(
[config.git, 'show-ref', 'origin/master', '--hash'])
if not git_hash:
raise DepsRollError('Git hash can not be found.')
return revision, git_hash
def revision_and_hash_from_revision(config, revision):
"""Finds revision number and git hash of a commit in the Skia tree.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
revision: (int) SVN revision number.
Returns:
A tuple (revision, hash)
revision: (int) SVN revision number.
git_hash: (string) full Git commit hash.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
with SkiaGitCheckout(config, config.search_depth):
revision_regex = config.revision_format % revision
git_hash = config.vsp.strip_output(
[config.git, 'log', '--grep', revision_regex,
'--format=format:%H', 'origin/master'])
if not git_hash:
raise DepsRollError('Git hash can not be found.')
return revision, git_hash
def revision_and_hash_from_partial(config, partial_hash):
"""Returns the SVN revision number and full git hash.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
partial_hash: (string) Partial git commit hash.
Returns:
A tuple (revision, hash)
revision: (int) SVN revision number.
git_hash: (string) full Git commit hash.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
with SkiaGitCheckout(config, config.search_depth):
git_hash = config.vsp.strip_output(
['git', 'log', '-n', '1', '--format=format:%H', partial_hash])
if not git_hash:
raise DepsRollError('Partial Git hash can not be found.')
revision = get_svn_revision(config, git_hash)
return revision, git_hash
def change_skia_deps(revision, git_hash, depspath):
"""Update the DEPS file.
Modify the skia_revision and skia_hash entries in the given DEPS file.
Args:
revision: (int) Skia SVN revision.
git_hash: (string) Skia Git hash.
depspath: (string) path to DEPS file.
"""
temp_file = tempfile.NamedTemporaryFile(delete=False,
prefix='skia_DEPS_ROLL_tmp_')
try:
deps_regex_rev = re.compile('"skia_revision": "[0-9]*",')
deps_regex_hash = re.compile('"skia_hash": "[0-9a-f]*",')
deps_regex_rev_repl = '"skia_revision": "%d",' % revision
deps_regex_hash_repl = '"skia_hash": "%s",' % git_hash
with open(depspath, 'r') as input_stream:
for line in input_stream:
line = deps_regex_rev.sub(deps_regex_rev_repl, line)
line = deps_regex_hash.sub(deps_regex_hash_repl, line)
temp_file.write(line)
finally:
temp_file.close()
shutil.move(temp_file.name, depspath)
def git_cl_uploader(config, message, file_list):
"""Create a commit in the current git branch; upload via git-cl.
Assumes that you are already on the branch you want to be on.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
message: (string) the commit message, can be multiline.
file_list: (list of strings) list of filenames to pass to `git add`.
Returns:
The output of `git cl issue`, if not config.skip_cl_upload, else ''.
"""
git, vsp = config.git, config.vsp
svn_info = str(get_svn_revision(config, 'HEAD'))
for filename in file_list:
assert os.path.exists(filename)
vsp.check_call([git, 'add', filename])
vsp.check_call([git, 'commit', '-q', '-m', message])
git_cl = [git, 'cl', 'upload', '-f',
'--bypass-hooks', '--bypass-watchlists']
if config.cc_list:
git_cl.append('--cc=%s' % config.cc_list)
if config.reviewers_list:
git_cl.append('--reviewers=%s' % config.reviewers_list)
git_try = [
git, 'cl', 'try', '-m', 'tryserver.chromium', '--revision', svn_info]
git_try.extend([arg for bot in config.cl_bot_list for arg in ('-b', bot)])
branch_name = git_utils.git_branch_name(vsp.verbose)
if config.skip_cl_upload:
space = ' '
print 'You should call:'
print '%scd %s' % (space, os.getcwd())
misc_utils.print_subprocess_args(space, [git, 'checkout', branch_name])
misc_utils.print_subprocess_args(space, git_cl)
if config.cl_bot_list:
misc_utils.print_subprocess_args(space, git_try)
print
return ''
else:
vsp.check_call(git_cl)
issue = vsp.strip_output([git, 'cl', 'issue'])
if config.cl_bot_list:
vsp.check_call(git_try)
return issue
def roll_deps(config, revision, git_hash):
"""Upload changed DEPS and a whitespace change.
Given the correct git_hash, create two Reitveld issues.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
revision: (int) Skia SVN revision.
git_hash: (string) Skia Git hash.
Returns:
a tuple containing textual description of the two issues.
Raises:
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
git = config.git
with misc_utils.ChangeDir(config.chromium_path, config.verbose):
config.vsp.check_call([git, 'fetch', '-q', 'origin'])
old_revision = misc_utils.ReSearch.search_within_output(
config.verbose, '"skia_revision": "(?P<return>[0-9]+)",', None,
[git, 'show', 'origin/master:DEPS'])
assert old_revision
if revision == int(old_revision):
print 'DEPS is up to date!'
return (None, None)
master_hash = config.vsp.strip_output(
[git, 'show-ref', 'origin/master', '--hash'])
master_revision = get_svn_revision(config, 'origin/master')
# master_hash[8] gives each whitespace CL a unique name.
if config.save_branches:
branch = 'control_%s' % master_hash[:8]
else:
branch = None
message = ('whitespace change %s\n\n'
'Chromium base revision: %d / %s\n\n'
'This CL was created by Skia\'s roll_deps.py script.\n'
) % (master_hash[:8], master_revision, master_hash[:8])
with git_utils.ChangeGitBranch(branch, 'origin/master',
config.verbose):
branch = git_utils.git_branch_name(config.vsp.verbose)
with open('build/whitespace_file.txt', 'a') as output_stream:
output_stream.write('\nCONTROL\n')
whitespace_cl = git_cl_uploader(
config, message, ['build/whitespace_file.txt'])
control_url = misc_utils.ReSearch.search_within_string(
whitespace_cl, '(?P<return>https?://[^) ]+)', '?')
if config.save_branches:
whitespace_cl = '%s\n branch: %s' % (whitespace_cl, branch)
if config.save_branches:
branch = 'roll_%d_%s' % (revision, master_hash[:8])
else:
branch = None
message = (
'roll skia DEPS to %d\n\n'
'Chromium base revision: %d / %s\n'
'Old Skia revision: %s\n'
'New Skia revision: %d\n'
'Control CL: %s\n\n'
'This CL was created by Skia\'s roll_deps.py script.\n\n'
'Bypassing commit queue trybots:\n'
'NOTRY=true\n'
% (revision, master_revision, master_hash[:8],
old_revision, revision, control_url))
with git_utils.ChangeGitBranch(branch, 'origin/master',
config.verbose):
branch = git_utils.git_branch_name(config.vsp.verbose)
change_skia_deps(revision, git_hash, 'DEPS')
deps_cl = git_cl_uploader(config, message, ['DEPS'])
if config.save_branches:
deps_cl = '%s\n branch: %s' % (deps_cl, branch)
return deps_cl, whitespace_cl
def find_hash_and_roll_deps(config, revision=None, partial_hash=None):
"""Call find_hash_from_revision() and roll_deps().
The calls to git will be verbose on standard output. After a
successful upload of both issues, print links to the new
codereview issues.
Args:
config: (roll_deps.DepsRollConfig) object containing options.
revision: (int or None) the Skia SVN revision number or None
to use the tip of the tree.
partial_hash: (string or None) a partial pure-git Skia commit
hash. Don't pass both partial_hash and revision.
Raises:
roll_deps.DepsRollError: if the revision can't be found.
OSError: failed to execute git or git-cl.
subprocess.CalledProcessError: git returned unexpected status.
"""
if revision and partial_hash:
raise DepsRollError('Pass revision or partial_hash, not both.')
if partial_hash:
revision, git_hash = revision_and_hash_from_partial(
config, partial_hash)
elif revision:
revision, git_hash = revision_and_hash_from_revision(config, revision)
else:
revision, git_hash = revision_and_hash(config)
print 'revision=%r\nhash=%r\n' % (revision, git_hash)
deps_issue, whitespace_issue = roll_deps(config, revision, git_hash)
if deps_issue and whitespace_issue:
print 'DEPS roll:\n %s\n' % deps_issue
print 'Whitespace change:\n %s\n' % whitespace_issue
else:
print >> sys.stderr, 'No issues created.'
def main(args):
"""main function; see module-level docstring and GetOptionParser help.
Args:
args: sys.argv[1:]-type argument list.
"""
option_parser = DepsRollConfig.GetOptionParser()
options = option_parser.parse_args(args)[0]
if not options.chromium_path:
option_parser.error('Must specify chromium_path.')
if not os.path.isdir(options.chromium_path):
option_parser.error('chromium_path must be a directory.')
if not git_utils.git_executable():
option_parser.error('Invalid git executable.')
config = DepsRollConfig(options)
find_hash_and_roll_deps(config, options.revision, options.git_hash)
if __name__ == '__main__':
main(sys.argv[1:])
|
wanglei828/apollo
|
refs/heads/master
|
cyber/proto/__init__.py
|
3
|
# ****************************************************************************
# Copyright 2018 The Apollo Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
|
thomashaw/SecGen
|
refs/heads/master
|
modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/encodings/mbcs.py
|
860
|
""" Python 'mbcs' Codec for Windows
Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,
which was written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
jdelight/django
|
refs/heads/master
|
django/contrib/auth/handlers/modwsgi.py
|
537
|
from django import db
from django.contrib import auth
from django.utils.encoding import force_bytes
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
UserModel = auth.get_user_model()
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
UserModel = auth.get_user_model()
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
|
oktopac/tfword2vec
|
refs/heads/master
|
tfword2vec/utils.py
|
1
|
import collections
import random
import numpy as np
import tensorflow as tf
def read_words(fname):
with open(fname, 'r') as fd:
words = tf.compat.as_str(fd.read()).split()
return words
def generate_vocab(words, n=None):
vocab = collections.Counter(words)
return vocab.most_common(n=n)
def save_vocab(vocab, path):
with open(path, 'w') as fd:
fd.write("word\tcount\n")
for word, count in vocab:
fd.write("%s\t%d\n" % (word, count))
fd.close()
def generate_w2i_lookup(vocab):
assert(type(vocab)) == list
w2i = {}
for i, (word, count) in enumerate(vocab):
w2i[word] = i
return w2i
def generate_i2w_lookup(vocab):
return [x[0] for x in vocab]
def generate_index_document(w2i, words):
return [w2i[word] for word in words if word in w2i]
def generate_sample(index_document, context_window_size):
for index, center in enumerate(index_document):
context = random.randint(1, context_window_size)
# get a random target before the center word
for target in index_document[max(0, index - context): index]:
yield center, target
# get a random target after the center wrod
for target in index_document[index + 1: index + context + 1]:
yield center, target
def get_batch(iterator, batch_size):
""" Group a numerical stream into batches and yield them as Numpy arrays. """
while True:
center_batch = np.zeros(batch_size, dtype=np.int32)
target_batch = np.zeros([batch_size, 1])
for index in range(batch_size):
center_batch[index], target_batch[index] = next(iterator)
yield center_batch, target_batch
def setup_document_generator(fname, outdir, vocab_size, skip_window):
words = read_words(fname)
vocab = generate_vocab(words, vocab_size)
save_vocab(vocab, "%s/vocab.csv" % outdir)
w2i = generate_w2i_lookup(vocab)
i2w = generate_i2w_lookup(vocab)
index_document = generate_index_document(w2i, words)
del words
# Here we create a function that starts a new single generator
single_generator = generate_sample(index_document, skip_window)
return single_generator, i2w
|
ds-hwang/chromium-crosswalk
|
refs/heads/master
|
build/copy_test_data_ios.py
|
206
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Copies test data files or directories into a given output directory."""
import optparse
import os
import shutil
import sys
class WrongNumberOfArgumentsException(Exception):
pass
def EscapePath(path):
"""Returns a path with spaces escaped."""
return path.replace(" ", "\\ ")
def ListFilesForPath(path):
"""Returns a list of all the files under a given path."""
output = []
# Ignore revision control metadata directories.
if (os.path.basename(path).startswith('.git') or
os.path.basename(path).startswith('.svn')):
return output
# Files get returned without modification.
if not os.path.isdir(path):
output.append(path)
return output
# Directories get recursively expanded.
contents = os.listdir(path)
for item in contents:
full_path = os.path.join(path, item)
output.extend(ListFilesForPath(full_path))
return output
def CalcInputs(inputs):
"""Computes the full list of input files for a set of command-line arguments.
"""
# |inputs| is a list of paths, which may be directories.
output = []
for input in inputs:
output.extend(ListFilesForPath(input))
return output
def CopyFiles(relative_filenames, output_basedir):
"""Copies files to the given output directory."""
for file in relative_filenames:
relative_dirname = os.path.dirname(file)
output_dir = os.path.join(output_basedir, relative_dirname)
output_filename = os.path.join(output_basedir, file)
# In cases where a directory has turned into a file or vice versa, delete it
# before copying it below.
if os.path.exists(output_dir) and not os.path.isdir(output_dir):
os.remove(output_dir)
if os.path.exists(output_filename) and os.path.isdir(output_filename):
shutil.rmtree(output_filename)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(file, output_filename)
def DoMain(argv):
parser = optparse.OptionParser()
usage = 'Usage: %prog -o <output_dir> [--inputs] [--outputs] <input_files>'
parser.set_usage(usage)
parser.add_option('-o', dest='output_dir')
parser.add_option('--inputs', action='store_true', dest='list_inputs')
parser.add_option('--outputs', action='store_true', dest='list_outputs')
options, arglist = parser.parse_args(argv)
if len(arglist) == 0:
raise WrongNumberOfArgumentsException('<input_files> required.')
files_to_copy = CalcInputs(arglist)
escaped_files = [EscapePath(x) for x in CalcInputs(arglist)]
if options.list_inputs:
return '\n'.join(escaped_files)
if not options.output_dir:
raise WrongNumberOfArgumentsException('-o required.')
if options.list_outputs:
outputs = [os.path.join(options.output_dir, x) for x in escaped_files]
return '\n'.join(outputs)
CopyFiles(files_to_copy, options.output_dir)
return
def main(argv):
try:
result = DoMain(argv[1:])
except WrongNumberOfArgumentsException, e:
print >>sys.stderr, e
return 1
if result:
print result
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
yephper/django
|
refs/heads/master
|
django/bin/minicms/DjangoUeditor/commands.py
|
2
|
# coding:utf-8
import urllib
from . import settings as USettings
class UEditorEventHandler(object):
"""用来处理UEditor的事件侦听"""
def on_selectionchange(self):
return ""
def on_contentchange(self):
return ""
def render(self,editorID):
jscode="""
%(editor)s.addListener('%(event)s', function () {
%(event_code)s
});"""
event_codes=[]
#列出所有on_打头的方法,然后在ueditor中进行侦听
events=filter(lambda x: x[0:3]=="on_", dir(self))
for event in events:
try:
event_code=getattr(self,event)()
if event_code:
event_code=event_code % {"editor":editorID}
event_codes.append(jscode % {"editor":editorID,"event":event[3:],"event_code":event_code})
except:
pass
if len(event_codes)==0:
return ""
else:
return "\n".join(event_codes)
class UEditorCommand(object):
"""
为前端增加按钮,下拉等扩展,
"""
def __init__(self,**kwargs):
self.uiName=kwargs.pop("uiName","")
self.index=kwargs.pop("index",0)
self.title=kwargs.pop("title",self.uiName)
self.ajax_url=kwargs.pop("ajax_url","")
def render_ui(self,editor):
"""" 创建ueditor的ui扩展对象的js代码,如button,combo等 """
raise NotImplementedError
def render_ajax_command(self):
""""生成通过ajax调用后端命令的前端ajax代码"""
if not self.ajax_url: return ""
return u"""
UE.ajax.request( '%(ajax_url)s', {
data: {
name: 'ueditor'
},
onsuccess: function ( xhr ) {%(ajax_success)s},
onerror: function ( xhr ){ %(ajax_error)s }
});
""" % {
"ajax_url":self.ajax_url,
"ajax_success":self.onExecuteAjaxCommand("success"),
"ajax_error":self.onExecuteAjaxCommand("error")
}
def render_command(self):
"""" 返回注册命令的js定义 """
cmd=self.onExecuteCommand()
ajax_cmd=self.render_ajax_command()
queryvalue_command=self.onExecuteQueryvalueCommand()
cmds=[]
if cmd or ajax_cmd:
cmds.append( u"""execCommand: function() {
%(exec_cmd)s
%(exec_ajax_cmd)s
}
""" % {"exec_cmd":cmd,"exec_ajax_cmd":ajax_cmd},)
if queryvalue_command:
cmds.append(u"""queryCommandValue:function(){
%s
}""" % queryvalue_command)
if len(cmds)>0:
return u"""
editor.registerCommand(uiName, {
%s
});
""" % ",".join(cmds)
else:
return ""
def render(self,editorID):
return u"""
UE.registerUI("%(uiName)s", function(editor, uiName) {
%(registerCommand)s
%(uiObject)s
},%(index)s,"%(editor)s");
""" % {
"registerCommand":self.render_command(),
"uiName":self.uiName,
"uiObject":self.render_ui(editorID),
"index":self.index,
"editor":editorID
}
def onExecuteCommand(self):
""" 返回执行Command时的js代码 """
return ""
def onExecuteAjaxCommand(self,state):
""" 返回执行Command时发起Ajax调用成功与失败的js代码 """
return ""
def onExecuteQueryvalueCommand(self):
"""" 返回执行QueryvalueCommand时的js代码 """
return ""
class UEditorButtonCommand(UEditorCommand):
def __init__(self,**kwargs):
self.icon = kwargs.pop("icon", "")
super(UEditorButtonCommand, self).__init__(**kwargs)
def onClick(self):
""""按钮单击js代码,默认执行uiName命令,默认会调用Command """
return """
editor.execCommand(uiName);
"""
def render_ui(self,editorID):
""" 创建button的js代码: """
return """
var btn = new UE.ui.Button({
name: uiName,
title: "%(title)s",
cssRules: "background-image:url('%(icon)s')!important;",
onclick: function() {
%(onclick)s
}
});
return btn
""" % {
"icon": urllib.basejoin(USettings.gSettings.MEDIA_URL , self.icon),
"onclick": self.onClick(),
"title": self.title
}
class UEditorComboCommand(UEditorCommand):
def __init__(self,**kwargs):
self.items = kwargs.pop("items", [])
self.initValue=kwargs.pop("initValue","")
super(UEditorComboCommand, self).__init__(**kwargs)
def get_items(self):
return self.items
def onSelect(self):
return ""
def render_ui(self,editorID):
""" 创建combo的js代码: """
return """
var combox = new UE.ui.Combox({
editor:editor,
items:%(items)s,
onselect:function (t, index) {
%(onselect)s
},
title:'%(title)s',
initValue:'%(initValue)s'
});
return combox;
""" % {
"title":self.title,
"items":str(self.get_items()),
"onselect": self.onSelect(),
"initValue":self.initValue
}
class UEditorDialogCommand(UEditorCommand):
pass
|
OCA/purchase-workflow
|
refs/heads/12.0
|
purchase_order_line_price_history/__init__.py
|
3
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import wizards
|
poojavade/Genomics_Docker
|
refs/heads/master
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/bx/seq/__init__.py
|
7
|
"""
Classes for dealing with biological sequences. See `core` for the abstract
sequence classes and `nib` and `qdna` for specifics of various
formats.
"""
from bx.seq.core import *
|
qrkourier/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/__init__.py
|
28
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from ansible.module_utils.six import with_metaclass
from ansible.errors import AnsibleFileNotFound
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['LookupBase']
class LookupBase(with_metaclass(ABCMeta, object)):
def __init__(self, loader=None, templar=None, **kwargs):
self._loader = loader
self._templar = templar
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
def get_basedir(self, variables):
if 'role_path' in variables:
return variables['role_path']
else:
return self._loader.get_basedir()
@staticmethod
def _flatten(terms):
ret = []
for term in terms:
if isinstance(term, (list, tuple)):
ret.extend(term)
else:
ret.append(term)
return ret
@staticmethod
def _combine(a, b):
results = []
for x in a:
for y in b:
results.append(LookupBase._flatten([x, y]))
return results
@staticmethod
def _flatten_hash_to_list(terms):
ret = []
for key in terms:
ret.append({'key': key, 'value': terms[key]})
return ret
@abstractmethod
def run(self, terms, variables=None, **kwargs):
"""
When the playbook specifies a lookup, this method is run. The
arguments to the lookup become the arguments to this method. One
additional keyword argument named ``variables`` is added to the method
call. It contains the variables available to ansible at the time the
lookup is templated. For instance::
"{{ lookup('url', 'https://toshio.fedorapeople.org/one.txt', validate_certs=True) }}"
would end up calling the lookup plugin named url's run method like this::
run(['https://toshio.fedorapeople.org/one.txt'], variables=available_variables, validate_certs=True)
Lookup plugins can be used within playbooks for looping. When this
happens, the first argument is a list containing the terms. Lookup
plugins can also be called from within playbooks to return their
values into a variable or parameter. If the user passes a string in
this case, it is converted into a list.
Errors encountered during execution should be returned by raising
AnsibleError() with a message describing the error.
Any strings returned by this method that could ever contain non-ascii
must be converted into python's unicode type as the strings will be run
through jinja2 which has this requirement. You can use::
from ansible.module_utils._text import to_text
result_string = to_text(result_string)
"""
pass
def find_file_in_search_path(self, myvars, subdir, needle, ignore_missing=False):
'''
Return a file (needle) in the task's expected search path.
'''
if 'ansible_search_path' in myvars:
paths = myvars['ansible_search_path']
else:
paths = [self.get_basedir(myvars)]
result = None
try:
result = self._loader.path_dwim_relative_stack(paths, subdir, needle)
except AnsibleFileNotFound:
if not ignore_missing:
self._display.warning("Unable to find '%s' in expected paths." % needle)
return result
|
NielsZeilemaker/incubator-airflow
|
refs/heads/master
|
tests/utils/test_dates.py
|
8
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import unittest
from airflow.utils import dates
class Dates(unittest.TestCase):
def test_days_ago(self):
today = datetime.today()
today_midnight = datetime.fromordinal(today.date().toordinal())
self.assertTrue(dates.days_ago(0) == today_midnight)
self.assertTrue(
dates.days_ago(100) == today_midnight + timedelta(days=-100))
self.assertTrue(
dates.days_ago(0, hour=3) == today_midnight + timedelta(hours=3))
self.assertTrue(
dates.days_ago(0, minute=3)
== today_midnight + timedelta(minutes=3))
self.assertTrue(
dates.days_ago(0, second=3)
== today_midnight + timedelta(seconds=3))
self.assertTrue(
dates.days_ago(0, microsecond=3)
== today_midnight + timedelta(microseconds=3))
if __name__ == '__main__':
unittest.main()
|
Ecpy/ecpy
|
refs/heads/master
|
tests/app/errors/test_plugin.py
|
1
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test the ErrorsPlugin.
"""
import pytest
import enaml
from enaml.widgets.api import MultilineField
from exopy.testing.util import handle_dialog, get_window, show_and_close_widget
with enaml.imports():
from enaml.workbench.core.core_manifest import CoreManifest
from enaml.workbench.ui.ui_manifest import UIManifest
from exopy.app.app_manifest import AppManifest
from exopy.app.errors.manifest import ErrorsManifest
from exopy.app.errors.widgets import HierarchicalErrorsDisplay
from exopy.app.packages.manifest import PackagesManifest
APP_ID = 'exopy.app'
ERRORS_ID = 'exopy.app.errors'
@pytest.fixture
def err_workbench(workbench):
"""Create a workbench and register basic manifests.
"""
workbench.register(CoreManifest())
workbench.register(ErrorsManifest())
workbench.register(PackagesManifest())
return workbench
class FailedFormat(object):
def __str__(self):
self.called = 1
raise ValueError()
def __repr__(self):
self.called = 1
raise ValueError()
# =============================================================================
# --- Test plugin -------------------------------------------------------------
# =============================================================================
def test_life_cycle(err_workbench):
"""Test basic behavior of ErrorsPlugin.
"""
plugin = err_workbench.get_plugin(ERRORS_ID)
assert len(plugin.errors) == 4
plugin._errors_handlers.contributions = {}
assert len(plugin.errors) == 0
plugin.stop()
assert not len(plugin.errors)
def test_signal_command_with_unknown(err_workbench, exopy_qtbot):
"""Test the signal command with a stupid kind of error.
"""
core = err_workbench.get_plugin('enaml.workbench.core')
with handle_dialog(exopy_qtbot):
core.invoke_command('exopy.app.errors.signal',
{'kind': 'stupid', 'msg': None})
with handle_dialog(exopy_qtbot):
fail = FailedFormat()
core.invoke_command('exopy.app.errors.signal',
{'kind': 'stupid', 'msg': fail})
assert getattr(fail, 'called', None)
def test_handling_error_in_handlers(err_workbench, exopy_qtbot):
"""Test handling an error occuring in a specilaized handler.
"""
plugin = err_workbench.get_plugin(ERRORS_ID)
def check_dialog(bot, dial):
assert 'error' in dial.errors
assert 'registering' not in dial.errors
with handle_dialog(exopy_qtbot, handler=check_dialog):
plugin.signal('registering')
with handle_dialog(exopy_qtbot, handler=check_dialog):
plugin.signal('registering', msg=FailedFormat())
def test_gathering_mode(err_workbench, exopy_qtbot):
"""Test gathering multiple errors.
"""
core = err_workbench.get_plugin('enaml.workbench.core')
core.invoke_command('exopy.app.errors.enter_error_gathering')
core.invoke_command('exopy.app.errors.signal',
{'kind': 'stupid', 'msg': None})
with pytest.raises(AssertionError):
get_window(exopy_qtbot)
with handle_dialog(exopy_qtbot):
core.invoke_command('exopy.app.errors.exit_error_gathering')
def test_report_command(err_workbench, exopy_qtbot):
"""Test generating an application errors report.
"""
core = err_workbench.get_plugin('enaml.workbench.core')
with handle_dialog(exopy_qtbot):
core.invoke_command('exopy.app.errors.report')
with handle_dialog(exopy_qtbot):
core.invoke_command('exopy.app.errors.report', dict(kind='error'))
with handle_dialog(exopy_qtbot):
core.invoke_command('exopy.app.errors.report', dict(kind='stupid'))
def test_install_excepthook(err_workbench, exopy_qtbot):
"""Test the installation and use of the sys.excepthook.
"""
import sys
old_hook = sys.excepthook
err_workbench.register(UIManifest())
err_workbench.register(AppManifest())
core = err_workbench.get_plugin('enaml.workbench.core')
core.invoke_command('exopy.app.errors.install_excepthook')
new_hook = sys.excepthook
sys.excepthook = old_hook
assert old_hook is not new_hook
try:
raise Exception()
except Exception:
with handle_dialog(exopy_qtbot):
new_hook(*sys.exc_info())
# =============================================================================
# --- Test error handler ------------------------------------------------------
# =============================================================================
def test_reporting_single_error(err_workbench):
"""Check handling a single error.
"""
plugin = err_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['error']
assert handler.handle(err_workbench, {'message': 'test'})
assert 'No message' in handler.handle(err_workbench, {}).text
def test_reporting_multiple_errors(err_workbench):
"""Check handling multiple errors.
"""
plugin = err_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['error']
assert handler.handle(err_workbench, [{'message': 'test'}])
assert 'No message' in handler.handle(err_workbench, {}).text
# =============================================================================
# --- Test registering handler ------------------------------------------------
# =============================================================================
def test_reporting_single_registering_error(err_workbench):
"""Check handling a single registering error.
"""
plugin = err_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['registering']
assert handler.handle(err_workbench, {'id': 'test', 'message': 'test'})
with pytest.raises(Exception):
handler.handle(err_workbench, {})
def test_reporting_multiple_registering_errors(err_workbench):
"""Check handling multiple package errors.
"""
plugin = err_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['registering']
assert handler.handle(err_workbench, [{'id': 'test', 'message': 'test'}])
with pytest.raises(Exception):
handler.handle(err_workbench, {})
# =============================================================================
# --- Test extensions handler -------------------------------------------------
# =============================================================================
def test_handling_single_extension_error(err_workbench):
"""Check handling a single extension error.
"""
plugin = err_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['extensions']
assert handler.handle(err_workbench, {'point': 'test', 'errors': {}})
with pytest.raises(Exception):
handler.handle(err_workbench, {})
def test_handling_multiple_extension_errors(err_workbench):
"""Check handling multiple extension errors.
"""
plugin = err_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['extensions']
assert handler.handle(err_workbench, [{'point': 'test', 'errors': {}}])
with pytest.raises(Exception):
handler.handle(err_workbench, {})
def test_reporting_on_extension_errors(exopy_qtbot, err_workbench):
"""Check reporting extension errors.
"""
plugin = err_workbench.get_plugin('exopy.app.errors')
handler = plugin._errors_handlers.contributions['extensions']
widget = handler.report(err_workbench)
assert isinstance(widget, MultilineField)
show_and_close_widget(exopy_qtbot, widget)
handler.errors = {'test': {'errror': 'msg'}}
widget = handler.report(err_workbench)
assert isinstance(widget, HierarchicalErrorsDisplay)
show_and_close_widget(exopy_qtbot, widget)
# =============================================================================
# --- API import --------------------------------------------------------------
# =============================================================================
def test_api_import():
"""Test importing the api module.
"""
from exopy.app.errors import api
assert api.__all__
|
sanguinariojoe/FreeCAD
|
refs/heads/master
|
src/Mod/Robot/Init.py
|
18
|
# FreeCAD init script of the Robot module
# (c) 2001 Juergen Riegel
#***************************************************************************
#* Copyright (c) 2002 Juergen Riegel <juergen.riegel@web.de> *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************/
|
AndreaCrotti/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/group_by.py
|
172
|
# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Create inventory groups based on variables '''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
if not 'key' in self._task.args:
return dict(failed=True, msg="the 'key' param is required when using group_by")
group_name = self._task.args.get('key')
group_name = group_name.replace(' ','-')
return dict(changed=True, add_group=group_name)
|
michaelld/gnuradio
|
refs/heads/master
|
gr-utils/python/modtool/core/info.py
|
2
|
#
# Copyright 2013, 2018 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" Returns information about a module """
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from ..tools import get_modname
from .base import ModTool, ModToolException
class ModToolInfo(ModTool):
""" Return information about a given module """
name = 'info'
description = 'Return information about a given module.'
def __init__(self, python_readable=False, suggested_dirs=None, **kwargs):
ModTool.__init__(self, **kwargs)
# Don't call ModTool._validate(), is is too chatty!
self._directory = self.dir
self._python_readable = python_readable
self._suggested_dirs = suggested_dirs
def run(self):
""" Go, go, go! """
mod_info = dict()
mod_info['base_dir'] = self._get_base_dir(self._directory)
if mod_info['base_dir'] is None:
raise ModToolException('{}' if self._python_readable else "No module found.")
os.chdir(mod_info['base_dir'])
mod_info['modname'] = get_modname()
if mod_info['modname'] is None:
raise ModToolException('{}' if self._python_readable else "No module found.")
if self.info['version'] == '36' and (
os.path.isdir(os.path.join('include', mod_info['modname'])) or
os.path.isdir(os.path.join('include', 'gnuradio', mod_info['modname']))
):
self.info['version'] = '37'
mod_info['version'] = self.info['version']
if 'is_component' in list(self.info.keys()) and self.info['is_component']:
mod_info['is_component'] = True
mod_info['incdirs'] = []
mod_incl_dir = os.path.join(mod_info['base_dir'], 'include')
if os.path.isdir(os.path.join(mod_incl_dir, mod_info['modname'])):
mod_info['incdirs'].append(os.path.join(mod_incl_dir, mod_info['modname']))
else:
mod_info['incdirs'].append(mod_incl_dir)
build_dir = self._get_build_dir(mod_info)
if build_dir is not None:
mod_info['build_dir'] = build_dir
mod_info['incdirs'] += self._get_include_dirs(mod_info)
if self._python_readable:
print(str(mod_info))
else:
self._pretty_print(mod_info)
def _get_base_dir(self, start_dir):
""" Figure out the base dir (where the top-level cmake file is) """
base_dir = os.path.abspath(start_dir)
if self._check_directory(base_dir):
return base_dir
else:
(up_dir, this_dir) = os.path.split(base_dir)
if os.path.split(up_dir)[1] == 'include':
up_dir = os.path.split(up_dir)[0]
if self._check_directory(up_dir):
return up_dir
return None
def _get_build_dir(self, mod_info):
""" Figure out the build dir (i.e. where you run 'cmake'). This checks
for a file called CMakeCache.txt, which is created when running cmake.
If that hasn't happened, the build dir cannot be detected, unless it's
called 'build', which is then assumed to be the build dir. """
base_build_dir = mod_info['base_dir']
if 'is_component' in list(mod_info.keys()):
(base_build_dir, rest_dir) = os.path.split(base_build_dir)
has_build_dir = os.path.isdir(os.path.join(base_build_dir, 'build'))
if (has_build_dir and os.path.isfile(os.path.join(base_build_dir, 'CMakeCache.txt'))):
return os.path.join(base_build_dir, 'build')
else:
for (dirpath, dirnames, filenames) in os.walk(base_build_dir):
if 'CMakeCache.txt' in filenames:
return dirpath
if has_build_dir:
return os.path.join(base_build_dir, 'build')
return None
def _get_include_dirs(self, mod_info):
""" Figure out include dirs for the make process. """
inc_dirs = []
path_or_internal = {True: 'INTERNAL',
False: 'PATH'}['is_component' in list(mod_info.keys())]
try:
cmakecache_fid = open(os.path.join(mod_info['build_dir'], 'CMakeCache.txt'))
for line in cmakecache_fid:
if line.find('GNURADIO_RUNTIME_INCLUDE_DIRS:{}'.format(path_or_internal)) != -1:
inc_dirs += line.replace('GNURADIO_RUNTIME_INCLUDE_DIRS:{}='.format(path_or_internal), '').strip().split(';')
except IOError:
pass
if not inc_dirs and self._suggested_dirs is not None:
inc_dirs = [os.path.normpath(path) for path in self._suggested_dirs.split(':') if os.path.isdir(path)]
return inc_dirs
def _pretty_print(elf, mod_info):
""" Output the module info in human-readable format """
index_names = {'base_dir': 'Base directory',
'modname': 'Module name',
'is_component': 'Is GR component',
'build_dir': 'Build directory',
'incdirs': 'Include directories'}
for key in list(mod_info.keys()):
if key == 'version':
print(" API version: {}".format({
'36': 'pre-3.7',
'37': 'post-3.7',
'38': 'post-3.8',
'autofoo': 'Autotools (pre-3.5)'
}[mod_info['version']]))
else:
print('%19s: %s' % (index_names[key], mod_info[key]))
|
orlenko/bccf
|
refs/heads/master
|
src/mezzanine/project_template/deploy/gunicorn.conf.py
|
30
|
import os
bind = "127.0.0.1:%(gunicorn_port)s"
workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1
loglevel = "error"
proc_name = "%(proj_name)s"
|
pchauncey/ansible
|
refs/heads/devel
|
test/units/parsing/utils/test_jsonify.py
|
119
|
# -*- coding: utf-8 -*-
# (c) 2016, James Cammarata <jimi@sngx.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.parsing.utils.jsonify import jsonify
class TestJsonify(unittest.TestCase):
def test_jsonify_simple(self):
self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}')
def test_jsonify_simple_format(self):
res = jsonify(dict(a=1, b=2, c=3), format=True)
cleaned = "".join([x.strip() for x in res.splitlines()])
self.assertEqual(cleaned, '{"a": 1,"b": 2,"c": 3}')
def test_jsonify_unicode(self):
self.assertEqual(jsonify(dict(toshio=u'くらとみ')), u'{"toshio": "くらとみ"}')
def test_jsonify_empty(self):
self.assertEqual(jsonify(None), '{}')
|
Stanford-Online/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/partitions/partitions_service.py
|
9
|
"""
This is a service-like API that assigns tracks which groups users are in for various
user partitions. It uses the user_service key/value store provided by the LMS runtime to
persist the assignments.
"""
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import logging
from openedx.core.djangoapps.request_cache.middleware import request_cached
from xmodule.partitions.partitions import UserPartition, UserPartitionError, ENROLLMENT_TRACK_PARTITION_ID
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
FEATURES = getattr(settings, 'FEATURES', {})
@request_cached
def get_all_partitions_for_course(course, active_only=False):
"""
A method that returns all `UserPartitions` associated with a course, as a List.
This will include the ones defined in course.user_partitions, but it may also
include dynamically included partitions (such as the `EnrollmentTrackUserPartition`).
Args:
course: the course for which user partitions should be returned.
active_only: if `True`, only partitions with `active` set to True will be returned.
Returns:
A List of UserPartitions associated with the course.
"""
all_partitions = course.user_partitions + _get_dynamic_partitions(course)
if active_only:
all_partitions = [partition for partition in all_partitions if partition.active]
return all_partitions
def _get_dynamic_partitions(course):
"""
Return the dynamic user partitions for this course.
If none exists, returns an empty array.
"""
enrollment_partition = _create_enrollment_track_partition(course)
return [enrollment_partition] if enrollment_partition else []
def _create_enrollment_track_partition(course):
"""
Create and return the dynamic enrollment track user partition.
If it cannot be created, None is returned.
"""
if not FEATURES.get('ENABLE_ENROLLMENT_TRACK_USER_PARTITION'):
return None
try:
enrollment_track_scheme = UserPartition.get_scheme("enrollment_track")
except UserPartitionError:
log.warning("No 'enrollment_track' scheme registered, EnrollmentTrackUserPartition will not be created.")
return None
used_ids = set(p.id for p in course.user_partitions)
if ENROLLMENT_TRACK_PARTITION_ID in used_ids:
log.warning(
"Can't add 'enrollment_track' partition, as ID {id} is assigned to {partition} in course {course}.".format(
id=ENROLLMENT_TRACK_PARTITION_ID,
partition=_get_partition_from_id(course.user_partitions, ENROLLMENT_TRACK_PARTITION_ID).name,
course=unicode(course.id)
)
)
return None
partition = enrollment_track_scheme.create_user_partition(
id=ENROLLMENT_TRACK_PARTITION_ID,
name=_(u"Enrollment Track Groups"),
description=_(u"Partition for segmenting users by enrollment track"),
parameters={"course_id": unicode(course.id)}
)
return partition
class PartitionService(object):
"""
This is an XBlock service that returns information about the user partitions associated
with a given course.
"""
def __init__(self, course_id, cache=None):
self._course_id = course_id
self._cache = cache
def get_course(self):
"""
Return the course instance associated with this PartitionService.
This default implementation looks up the course from the modulestore.
"""
return modulestore().get_course(self._course_id)
@property
def course_partitions(self):
"""
Return the set of partitions assigned to self._course_id (both those set directly on the course
through course.user_partitions, and any dynamic partitions that exist). Note: this returns
both active and inactive partitions.
"""
return get_all_partitions_for_course(self.get_course())
def get_user_group_id_for_partition(self, user, user_partition_id):
"""
If the user is already assigned to a group in user_partition_id, return the
group_id.
If not, assign them to one of the groups, persist that decision, and
return the group_id.
Args:
user_partition_id -- an id of a partition that's hopefully in the
runtime.user_partitions list.
Returns:
The id of one of the groups in the specified user_partition_id (as a string).
Raises:
ValueError if the user_partition_id isn't found.
"""
cache_key = "PartitionService.ugidfp.{}.{}.{}".format(
user.id, self._course_id, user_partition_id
)
if self._cache and (cache_key in self._cache):
return self._cache[cache_key]
user_partition = self.get_user_partition(user_partition_id)
if user_partition is None:
raise ValueError(
"Configuration problem! No user_partition with id {0} "
"in course {1}".format(user_partition_id, self._course_id)
)
group = self.get_group(user, user_partition)
group_id = group.id if group else None
if self._cache is not None:
self._cache[cache_key] = group_id
return group_id
def get_user_partition(self, user_partition_id):
"""
Look for a user partition with a matching id in the course's partitions.
Note that this method can return an inactive user partition.
Returns:
A UserPartition, or None if not found.
"""
return _get_partition_from_id(self.course_partitions, user_partition_id)
def get_group(self, user, user_partition, assign=True):
"""
Returns the group from the specified user partition to which the user is assigned.
If the user has not yet been assigned, a group will be chosen for them based upon
the partition's scheme.
"""
return user_partition.scheme.get_group_for_user(
self._course_id, user, user_partition, assign=assign,
)
def _get_partition_from_id(partitions, user_partition_id):
"""
Look for a user partition with a matching id in the provided list of partitions.
Returns:
A UserPartition, or None if not found.
"""
for partition in partitions:
if partition.id == user_partition_id:
return partition
return None
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/scrapy/scrapy/tests/test_cmdline/extensions.py
|
19
|
"""A test extension used to check the settings loading order"""
from scrapy.conf import settings
settings.overrides['TEST1'] = "%s + %s" % (settings['TEST1'], 'loaded')
class TestExtension(object):
def __init__(self):
settings.overrides['TEST1'] = "%s + %s" % (settings['TEST1'], 'started')
|
JMMolenaar/cadnano2.5
|
refs/heads/master
|
cadnano/gui/views/sliceview/partitem.py
|
1
|
from cadnano.gui.controllers.itemcontrollers.partitemcontroller import PartItemController
from .emptyhelixitem import EmptyHelixItem
from .virtualhelixitem import VirtualHelixItem
from .activesliceitem import ActiveSliceItem
from . import slicestyles as styles
import cadnano.util as util
from cadnano import getReopen
from PyQt5.QtCore import QPointF, Qt, QRectF, QEvent, pyqtSignal, pyqtSlot, QObject
from PyQt5.QtGui import QBrush, QPainterPath, QPen
from PyQt5.QtWidgets import QGraphicsItem, QGraphicsEllipseItem
_RADIUS = styles.SLICE_HELIX_RADIUS
_DEFAULT_RECT = QRectF(0, 0, 2 * _RADIUS, 2 * _RADIUS)
HIGHLIGHT_WIDTH = styles.SLICE_HELIX_MOD_HILIGHT_WIDTH
DELTA = (HIGHLIGHT_WIDTH - styles.SLICE_HELIX_STROKE_WIDTH)/2.
_HOVER_RECT = _DEFAULT_RECT.adjusted(-DELTA, -DELTA, DELTA, DELTA)
_MOD_PEN = QPen(styles.BLUE_STROKE, HIGHLIGHT_WIDTH)
class PartItem(QGraphicsItem):
_RADIUS = styles.SLICE_HELIX_RADIUS
def __init__(self, model_part, parent=None):
"""
Parent should be either a SliceRootItem, or an AssemblyItem.
Invariant: keys in _empty_helix_hash = range(_nrows) x range(_ncols)
where x is the cartesian product.
Order matters for deselector, probe, and setlattice
"""
super(PartItem, self).__init__(parent)
self._part = model_part
self._controller = PartItemController(self, model_part)
self._active_slice_item = ActiveSliceItem(self, model_part.activeBaseIndex())
self._scaleFactor = self._RADIUS/model_part.radius()
self._empty_helix_hash = {}
self._virtual_helix_hash = {}
self._nrows, self._ncols = 0, 0
self._rect = QRectF(0, 0, 0, 0)
self._initDeselector()
# Cache of VHs that were active as of last call to activeSliceChanged
# If None, all slices will be redrawn and the cache will be filled.
# Connect destructor. This is for removing a part from scenes.
self.probe = self.IntersectionProbe(self)
# initialize the PartItem with an empty set of old coords
self._setLattice([], model_part.generatorFullLattice())
self.setFlag(QGraphicsItem.ItemHasNoContents) # never call paint
self.setZValue(styles.ZPARTITEM)
self._initModifierCircle()
# end def
def _initDeselector(self):
"""
The deselector grabs mouse events that missed a slice and clears the
selection when it gets one.
"""
self.deselector = ds = PartItem.Deselector(self)
ds.setParentItem(self)
ds.setFlag(QGraphicsItem.ItemStacksBehindParent)
ds.setZValue(styles.ZDESELECTOR)
def _initModifierCircle(self):
self._can_show_mod_circ = False
self._mod_circ = m_c = QGraphicsEllipseItem(_HOVER_RECT, self)
m_c.setPen(_MOD_PEN)
m_c.hide()
# end def
### SIGNALS ###
### SLOTS ###
def partActiveVirtualHelixChangedSlot(self, part, virtualHelix):
pass
def partDimensionsChangedSlot(self, sender):
pass
# end def
def partHideSlot(self, sender):
self.hide()
# end def
def partParentChangedSlot(self, sender):
"""docstring for partParentChangedSlot"""
# print "PartItem.partParentChangedSlot"
pass
def partRemovedSlot(self, sender):
"""docstring for partRemovedSlot"""
self._active_slice_item.removed()
self.parentItem().removePartItem(self)
scene = self.scene()
self._virtual_helix_hash = None
for item in list(self._empty_helix_hash.items()):
key, val = item
scene.removeItem(val)
del self._empty_helix_hash[key]
self._empty_helix_hash = None
scene.removeItem(self)
self._part = None
self.probe = None
self._mod_circ = None
self.deselector = None
self._controller.disconnectSignals()
self._controller = None
# end def
def partVirtualHelicesReorderedSlot(self, sender, orderedCoordList):
pass
# end def
def partPreDecoratorSelectedSlot(self, sender, row, col, baseIdx):
"""docstring for partPreDecoratorSelectedSlot"""
vhi = self.getVirtualHelixItemByCoord(row, col)
view = self.window().slice_graphics_view
view.scene_root_item.resetTransform()
view.centerOn(vhi)
view.zoomIn()
mC = self._mod_circ
x,y = self._part.latticeCoordToPositionXY(row, col, self.scaleFactor())
mC.setPos(x,y)
if self._can_show_mod_circ:
mC.show()
# end def
def partVirtualHelixAddedSlot(self, sender, virtual_helix):
vh = virtual_helix
coords = vh.coord()
empty_helix_item = self._empty_helix_hash[coords]
# TODO test to see if self._virtual_helix_hash is necessary
vhi = VirtualHelixItem(vh, empty_helix_item)
self._virtual_helix_hash[coords] = vhi
# end def
def partVirtualHelixRenumberedSlot(self, sender, coord):
pass
# end def
def partVirtualHelixResizedSlot(self, sender, coord):
pass
# end def
def updatePreXoverItemsSlot(self, sender, virtualHelix):
pass
# end def
### ACCESSORS ###
def boundingRect(self):
return self._rect
# end def
def part(self):
return self._part
# end def
def scaleFactor(self):
return self._scaleFactor
# end def
def setPart(self, newPart):
self._part = newPart
# end def
def window(self):
return self.parentItem().window()
# end def
### PRIVATE SUPPORT METHODS ###
def _upperLeftCornerForCoords(self, row, col):
pass # subclass
# end def
def _updateGeometry(self):
self._rect = QRectF(0, 0, *self.part().dimensions())
# end def
def _spawnEmptyHelixItemAt(self, row, column):
helix = EmptyHelixItem(row, column, self)
# helix.setFlag(QGraphicsItem.ItemStacksBehindParent, True)
self._empty_helix_hash[(row, column)] = helix
# end def
def _killHelixItemAt(row, column):
s = self._empty_helix_hash[(row, column)]
s.scene().removeItem(s)
del self._empty_helix_hash[(row, column)]
# end def
def _setLattice(self, old_coords, new_coords):
"""A private method used to change the number of rows,
cols in response to a change in the dimensions of the
part represented by the receiver"""
old_set = set(old_coords)
old_list = list(old_set)
new_set = set(new_coords)
new_list = list(new_set)
for coord in old_list:
if coord not in new_set:
self._killHelixItemAt(*coord)
# end for
for coord in new_list:
if coord not in old_set:
self._spawnEmptyHelixItemAt(*coord)
# end for
# self._updateGeometry(newCols, newRows)
# self.prepareGeometryChange()
# the Deselector copies our rect so it changes too
self.deselector.prepareGeometryChange()
if not getReopen():
self.zoomToFit()
# end def
### PUBLIC SUPPORT METHODS ###
def getVirtualHelixItemByCoord(self, row, column):
if (row, column) in self._empty_helix_hash:
return self._virtual_helix_hash[(row, column)]
else:
return None
# end def
def paint(self, painter, option, widget=None):
pass
# end def
def selectionWillChange(self, newSel):
if self.part() == None:
return
if self.part().selectAllBehavior():
return
for sh in self._empty_helix_hash.values():
sh.setSelected(sh.virtualHelix() in newSel)
# end def
def setModifyState(self, bool):
"""Hides the mod_rect when modify state disabled."""
self._can_show_mod_circ = bool
if bool == False:
self._mod_circ.hide()
def updateStatusBar(self, statusString):
"""Shows statusString in the MainWindow's status bar."""
pass # disabled for now.
# self.window().statusBar().showMessage(statusString, timeout)
def vhAtCoordsChanged(self, row, col):
self._empty_helix_hash[(row, col)].update()
# end def
def zoomToFit(self):
thescene = self.scene()
theview = thescene.views()[0]
theview.zoomToFit()
# end def
### EVENT HANDLERS ###
def mousePressEvent(self, event):
# self.createOrAddBasesToVirtualHelix()
QGraphicsItem.mousePressEvent(self, event)
# end def
class Deselector(QGraphicsItem):
"""The deselector lives behind all the slices and observes mouse press
events that miss slices, emptying the selection when they do"""
def __init__(self, parent_HGI):
super(PartItem.Deselector, self).__init__()
self.parent_HGI = parent_HGI
def mousePressEvent(self, event):
self.parent_HGI.part().setSelection(())
super(PartItem.Deselector, self).mousePressEvent(event)
def boundingRect(self):
return self.parent_HGI.boundingRect()
def paint(self, painter, option, widget=None):
pass
class IntersectionProbe(QGraphicsItem):
def boundingRect(self):
return QRectF(0, 0, .1, .1)
def paint(self, painter, option, widget=None):
pass
|
mbbx6spp/nixops
|
refs/heads/master
|
nixops/ec2_utils.py
|
7
|
# -*- coding: utf-8 -*-
import os
import boto.ec2
import time
import random
import nixops.util
from boto.exception import EC2ResponseError
from boto.exception import SQSError
from boto.exception import BotoServerError
def fetch_aws_secret_key(access_key_id):
"""Fetch the secret access key corresponding to the given access key ID from the environment or from ~/.ec2-keys"""
secret_access_key = os.environ.get('EC2_SECRET_KEY') or os.environ.get('AWS_SECRET_ACCESS_KEY')
path = os.path.expanduser("~/.ec2-keys")
if os.path.isfile(path):
f = open(path, 'r')
contents = f.read()
f.close()
for l in contents.splitlines():
l = l.split("#")[0] # drop comments
w = l.split()
if len(w) < 2 or len(w) > 3: continue
if len(w) == 3 and w[2] == access_key_id:
access_key_id = w[0]
secret_access_key = w[1]
break
if w[0] == access_key_id:
secret_access_key = w[1]
break
if not secret_access_key:
raise Exception("please set $EC2_SECRET_KEY or $AWS_SECRET_ACCESS_KEY, or add the key for ‘{0}’ to ~/.ec2-keys"
.format(access_key_id))
return (access_key_id, secret_access_key)
def connect(region, access_key_id):
"""Connect to the specified EC2 region using the given access key."""
assert region
(access_key_id, secret_access_key) = fetch_aws_secret_key(access_key_id)
conn = boto.ec2.connect_to_region(
region_name=region, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key)
if not conn:
raise Exception("invalid EC2 region ‘{0}’".format(region))
return conn
def get_access_key_id():
return os.environ.get('EC2_ACCESS_KEY') or os.environ.get('AWS_ACCESS_KEY_ID')
def retry(f, error_codes=[], logger=None):
"""
Retry function f up to 7 times. If error_codes argument is empty list, retry on all EC2 response errors,
otherwise, only on the specified error codes.
"""
def handle_exception(e):
if i == num_retries or (error_codes != [] and not e.error_code in error_codes):
raise e
if logger is not None:
logger.log("got (possibly transient) EC2 error code ‘{0}’, retrying...".format(e.error_code))
i = 0
num_retries = 7
while i <= num_retries:
i += 1
next_sleep = 5 + random.random() * (2 ** i)
try:
return f()
except EC2ResponseError as e:
handle_exception(e)
except SQSError as e:
handle_exception(e)
except BotoServerError as e:
if e.error_code == "RequestLimitExceeded":
num_retries += 1
else:
handle_exception(e)
except Exception as e:
raise e
time.sleep(next_sleep)
def get_volume_by_id(conn, volume_id, allow_missing=False):
"""Get volume object by volume id."""
try:
volumes = conn.get_all_volumes([volume_id])
if len(volumes) != 1:
raise Exception("unable to find volume ‘{0}’".format(volume_id))
return volumes[0]
except boto.exception.EC2ResponseError as e:
if e.error_code != "InvalidVolume.NotFound": raise
return None
def wait_for_volume_available(conn, volume_id, logger, states=['available']):
"""Wait for an EBS volume to become available."""
logger.log_start("waiting for volume ‘{0}’ to become available... ".format(volume_id))
def check_available():
# Allow volume to be missing due to eventual consistency.
volume = get_volume_by_id(conn, volume_id, allow_missing=True)
logger.log_continue("[{0}] ".format(volume.status))
return volume.status in states
nixops.util.check_wait(check_available, max_tries=90)
logger.log_end('')
|
LowResourceLanguages/hltdi-l3
|
refs/heads/master
|
disambiguatr/nltk/classify/mallet.py
|
5
|
# Natural Language Toolkit: Interface to Mallet Machine Learning Package
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $
"""
A set of functions used to interface with the external U{Mallet
<http://mallet.cs.umass.edu/>} machine learning package. Before
C{mallet} can be used, you should tell NLTK where it can find the
C{mallet} package, using the L{config_mallet()} function. Typical
usage:
>>> import nltk
>>> nltk.config_mallet('.../path/to/mallet')
"""
__docformat__ = 'epytext en'
import os
import os.path
from nltk.internals import find_binary, java
######################################################################
#{ Configuration
######################################################################
_mallet_home = None
_mallet_classpath = None
def config_mallet(mallet_home=None):
"""
Configure NLTK's interface to the C{mallet} machine learning
package.
@param mallet_home: The full path to the C{mallet} directory. If
not specified, then nltk will search the system for a
C{mallet} directory; and if one is not found, it will raise a
C{LookupError} exception.
@type mallet_home: C{string}
"""
global _mallet_home, _mallet_classpath
# We don't actually care about this binary -- we just use it to
# make sure we've found the right directory.
mallethon_bin = find_binary(
'mallet', mallet_home,
env_vars=['MALLET', 'MALLET_HOME'],
binary_names=['mallethon'],
url='http://mallet.cs.umass.edu>')
# Record the location where mallet lives.
bin_dir = os.path.split(mallethon_bin)[0]
_mallet_home = os.path.split(bin_dir)[0]
# Construct a classpath for using mallet.
lib_dir = os.path.join(_mallet_home, 'lib')
if not os.path.isdir(lib_dir):
raise ValueError('While configuring mallet: directory %r '
'not found.' % lib_dir)
_mallet_classpath = ':'.join([os.path.join(lib_dir, filename)
for filename in sorted(os.listdir(lib_dir))
if filename.endswith('.jar')])
def call_mallet(cmd, classpath=None, stdin=None, stdout=None, stderr=None,
blocking=True):
"""
Call L{nltk.internals.java()} with the given command, and with the
classpath modified to include both C{nltk.jar} and all the C{.jar}
files defined by Mallet.
See L{nltk.internals.java()} for parameter and return value
descriptions.
"""
if _mallet_classpath is None:
config_mallet()
# Set up the classpath
if classpath is None:
classpath = _mallet_classpath
else:
classpath += ':' + _mallet_classpath
# Delegate to java()
return java(cmd, classpath, stdin, stdout, stderr, blocking)
|
salamer/django
|
refs/heads/master
|
tests/postgres_tests/array_default_migrations/0002_integerarraymodel_field_2.py
|
377
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('postgres_tests', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='integerarraydefaultmodel',
name='field_2',
field=django.contrib.postgres.fields.ArrayField(models.IntegerField(), default=[], size=None),
preserve_default=False,
),
]
|
jwomeara/PDAL
|
refs/heads/master
|
vendor/gtest-1.7.0/test/gtest_shuffle_test.py
|
3023
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
Demolisty24/AlexaFood-Backend
|
refs/heads/master
|
venv/Lib/encodings/mac_greek.py
|
593
|
""" Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-greek',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xb9' # 0x81 -> SUPERSCRIPT ONE
u'\xb2' # 0x82 -> SUPERSCRIPT TWO
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xb3' # 0x84 -> SUPERSCRIPT THREE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0384' # 0x8B -> GREEK TONOS
u'\xa8' # 0x8C -> DIAERESIS
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xa3' # 0x92 -> POUND SIGN
u'\u2122' # 0x93 -> TRADE MARK SIGN
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u2022' # 0x96 -> BULLET
u'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
u'\u2030' # 0x98 -> PER MILLE SIGN
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xa6' # 0x9B -> BROKEN BAR
u'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
u'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
u'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
u'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
u'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
u'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\xa7' # 0xAC -> SECTION SIGN
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xb0' # 0xAE -> DEGREE SIGN
u'\xb7' # 0xAF -> MIDDLE DOT
u'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
u'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
u'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
u'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
u'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
u'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
u'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
u'\xac' # 0xC2 -> NOT SIGN
u'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
u'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
u'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
u'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2015' # 0xD1 -> HORIZONTAL BAR
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
u'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
u'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03be' # 0xEA -> GREEK SMALL LETTER XI
u'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xED -> GREEK SMALL LETTER MU
u'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
u'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
u'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
u'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
u'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
u'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
jenalgit/django
|
refs/heads/master
|
tests/bash_completion/tests.py
|
327
|
"""
A series of tests to establish that the command-line bash completion works.
"""
import os
import sys
import unittest
from django.apps import apps
from django.core.management import ManagementUtility
from django.test.utils import captured_stdout
class BashCompletionTests(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE')
os.environ['DJANGO_AUTO_COMPLETE'] = '1'
def tearDown(self):
if self.old_DJANGO_AUTO_COMPLETE:
os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ['DJANGO_AUTO_COMPLETE']
def _user_input(self, input_str):
"""
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
"""
os.environ['COMP_WORDS'] = input_str
idx = len(input_str.split(' ')) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(' ') else idx
os.environ['COMP_CWORD'] = str(comp_cword)
sys.argv = input_str.split()
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
with captured_stdout() as stdout:
try:
util.autocomplete()
except SystemExit:
pass
return stdout.getvalue().strip().split('\n')
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input('django-admin sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input('manage.py sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input('django-admin test_command --l')
output = self._run_autocomplete()
self.assertEqual(output, ['--list'])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input('django-admin sql')
output = self._run_autocomplete()
self.assertEqual(output, ['sqlflush sqlmigrate sqlsequencereset'])
def test_completed_subcommand(self):
"Show option flags in case a subcommand is completed"
self._user_input('django-admin startproject ') # Trailing whitespace
output = self._run_autocomplete()
for item in output:
self.assertTrue(item.startswith('--'))
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input('django-admin help --')
output = self._run_autocomplete()
self.assertEqual(output, [''])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input('django-admin sqlmigrate a')
output = self._run_autocomplete()
a_labels = sorted(app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith('a'))
self.assertEqual(output, a_labels)
|
vmthunder/nova
|
refs/heads/master
|
nova/tests/objects/test_virtual_interface.py
|
31
|
# Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova.objects import virtual_interface as vif_obj
from nova.tests.objects import test_objects
fake_vif = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'address': '00:00:00:00:00:00',
'network_id': 123,
'instance_uuid': 'fake-uuid',
'uuid': 'fake-uuid-2',
}
class _TestVirtualInterface(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
def test_get_by_id(self):
with mock.patch.object(db, 'virtual_interface_get') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_id(self.context, 1)
self._compare(self, fake_vif, vif)
def test_get_by_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_vif, vif)
def test_get_by_address(self):
with mock.patch.object(db, 'virtual_interface_get_by_address') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_address(self.context,
'00:00:00:00:00:00')
self._compare(self, fake_vif, vif)
def test_get_by_instance_and_network(self):
with mock.patch.object(db,
'virtual_interface_get_by_instance_and_network') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_instance_and_network(
self.context, 'fake-uuid', 123)
self._compare(self, fake_vif, vif)
def test_create(self):
vif = vif_obj.VirtualInterface()
vif.address = '00:00:00:00:00:00'
vif.network_id = 123
vif.instance_uuid = 'fake-uuid'
vif.uuid = 'fake-uuid-2'
with mock.patch.object(db, 'virtual_interface_create') as create:
create.return_value = fake_vif
vif.create(self.context)
self.assertEqual(self.context, vif._context)
vif._context = None
self._compare(self, fake_vif, vif)
def test_delete_by_instance_uuid(self):
with mock.patch.object(db,
'virtual_interface_delete_by_instance') as delete:
vif_obj.VirtualInterface.delete_by_instance_uuid(self.context,
'fake-uuid')
delete.assert_called_with(self.context, 'fake-uuid')
class TestVirtualInterfaceObject(test_objects._LocalTest,
_TestVirtualInterface):
pass
class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest,
_TestVirtualInterface):
pass
class _TestVirtualInterfaceList(object):
def test_get_all(self):
with mock.patch.object(db, 'virtual_interface_get_all') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_all(self.context)
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
def test_get_by_instance_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_instance') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(
self.context, 'fake-uuid')
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
class TestVirtualInterfaceList(test_objects._LocalTest,
_TestVirtualInterfaceList):
pass
class TestRemoteVirtualInterfaceList(test_objects._RemoteTest,
_TestVirtualInterfaceList):
pass
|
ak64th/IntQuiz
|
refs/heads/master
|
api.py
|
1
|
# coding=utf-8
from flask import request, url_for, g
from flask_peewee.rest import RestAPI, RestrictOwnerResource, Authentication, RestResource
from app import app
from auth import auth
from models import *
class IntApi(RestAPI):
@property
def registry(self):
return self._registry
class IntAuthentication(Authentication):
def __init__(self, auth, protected_methods=None):
super(IntAuthentication, self).__init__(protected_methods)
self.auth = auth
def authorize(self):
if request.method in self.protected_methods:
return self.auth.get_logged_in_user()
return True
class IntRestResource(RestResource):
paginate_by = 10
def get_request_metadata(self, paginated_query):
var = paginated_query.page_var
request_arguments = request.args.copy()
current_page = paginated_query.get_page()
next = previous = ''
if current_page > 1:
request_arguments[var] = current_page - 1
previous = url_for(self.get_url_name('api_list'), **request_arguments)
if current_page < paginated_query.get_pages():
request_arguments[var] = current_page + 1
next = url_for(self.get_url_name('api_list'), **request_arguments)
return {
'model': self.get_api_name(),
'page': current_page,
'pages': paginated_query.get_pages(),
'total': paginated_query.query.count(),
'previous': previous,
'next': next,
}
class IntOwnerResource(IntRestResource, RestrictOwnerResource):
owner_field = 'user'
def validate_owner(self, user, obj):
return user.admin or user == getattr(obj, self.owner_field)
class IntOnlyViewByOwnerResource(IntOwnerResource):
def restrict_get_query(self, user, query):
if not user.admin:
query = query.where(getattr(self.model, self.owner_field) == g.user)
return query
def process_query(self, query):
query = super(IntOwnerResource, self).process_query(query)
return self.restrict_get_query(g.user, query)
class UserResource(IntRestResource):
exclude = ('password',)
class QuizBookResource(IntOnlyViewByOwnerResource):
include_resources = {'user': UserResource}
class QuestionResource(IntRestResource):
pass
class ActivityResource(IntOnlyViewByOwnerResource):
include_resources = {'book': QuizBookResource, 'user': UserResource}
user_auth = IntAuthentication(auth)
api = IntApi(app, prefix='/api/v1', default_auth=user_auth, name='simple_api')
api.register(User, UserResource, auth=Authentication())
api.register(QuizBook, QuizBookResource)
api.register(Question, QuestionResource)
api.register(Activity, ActivityResource)
|
dantebarba/docker-media-server
|
refs/heads/master
|
plex/Sub-Zero.bundle/Contents/Libraries/Shared/subzero/modification/main.py
|
1
|
# coding=utf-8
import traceback
import re
import pysubs2
import logging
import time
from mods import EMPTY_TAG_PROCESSOR, EmptyEntryError
from registry import registry
from subzero.language import Language
logger = logging.getLogger(__name__)
lowercase_re = re.compile(ur'(?sux)[a-zà-ž]')
class SubtitleModifications(object):
debug = False
language = None
initialized_mods = {}
mods_used = []
only_uppercase = False
f = None
font_style_tag_start = u"{\\"
def __init__(self, debug=False):
self.debug = debug
self.initialized_mods = {}
self.mods_used = []
def load(self, fn=None, content=None, language=None, encoding="utf-8"):
"""
:param encoding: used for decoding the content when fn is given, not used in case content is given
:param language: babelfish.Language language of the subtitle
:param fn: filename
:param content: unicode
:return:
"""
if language:
self.language = Language.rebuild(language, forced=False)
self.initialized_mods = {}
try:
if fn:
self.f = pysubs2.load(fn, encoding=encoding)
elif content:
self.f = pysubs2.SSAFile.from_string(content)
except (IOError,
UnicodeDecodeError,
pysubs2.exceptions.UnknownFPSError,
pysubs2.exceptions.UnknownFormatIdentifierError,
pysubs2.exceptions.FormatAutodetectionError):
if fn:
logger.exception("Couldn't load subtitle: %s: %s", fn, traceback.format_exc())
elif content:
logger.exception("Couldn't load subtitle: %s", traceback.format_exc())
return bool(self.f)
@classmethod
def parse_identifier(cls, identifier):
# simple identifier
if identifier in registry.mods:
return identifier, {}
# identifier with params; identifier(param=value)
split_args = identifier[identifier.find("(")+1:-1].split(",")
args = dict((key, value) for key, value in [sub.split("=") for sub in split_args])
return identifier[:identifier.find("(")], args
@classmethod
def get_mod_class(cls, identifier):
identifier, args = cls.parse_identifier(identifier)
return registry.mods[identifier]
@classmethod
def get_mod_signature(cls, identifier, **kwargs):
return cls.get_mod_class(identifier).get_signature(**kwargs)
def prepare_mods(self, *mods):
parsed_mods = [(SubtitleModifications.parse_identifier(mod), mod) for mod in mods]
final_mods = {}
line_mods = []
non_line_mods = []
used_mods = []
mods_merged = {}
mods_merged_log = {}
for mod_data, orig_identifier in parsed_mods:
identifier, args = mod_data
if identifier not in registry.mods:
logger.error("Mod %s not loaded", identifier)
continue
mod_cls = registry.mods[identifier]
# exclusive mod, kill old, use newest
if identifier in final_mods and mod_cls.exclusive:
final_mods.pop(identifier)
# language-specific mod, check validity
if mod_cls.languages and self.language not in mod_cls.languages:
if self.debug:
logger.debug("Skipping %s, because %r is not a valid language for this mod",
identifier, self.language)
continue
if mod_cls.only_uppercase and not self.only_uppercase:
if self.debug:
logger.debug("Skipping %s, because the subtitle isn't all uppercase", identifier)
continue
# merge args of duplicate mods if possible
elif mod_cls.args_mergeable and identifier in mods_merged:
mods_merged[identifier] = mod_cls.merge_args(mods_merged[identifier], args)
mods_merged_log[identifier]["identifiers"].append(orig_identifier)
continue
if mod_cls.args_mergeable:
mods_merged[identifier] = mod_cls.merge_args(args, {})
mods_merged_log[identifier] = {"identifiers": [orig_identifier], "final_identifier": orig_identifier}
used_mods.append("%s_ORIG_POSITION" % identifier)
continue
final_mods[identifier] = args
used_mods.append(orig_identifier)
# finalize merged mods into final and used mods
for identifier, args in mods_merged.iteritems():
pos_preserve_index = used_mods.index("%s_ORIG_POSITION" % identifier)
# clear empty mods after merging
if not any(args.values()):
if self.debug:
logger.debug("Skipping %s, empty args", identifier)
if pos_preserve_index > -1:
used_mods.pop(pos_preserve_index)
mods_merged_log.pop(identifier)
continue
# clear empty args
final_mod_args = dict(filter(lambda (k, v): bool(v), args.iteritems()))
_data = SubtitleModifications.get_mod_signature(identifier, **final_mod_args)
if _data == mods_merged_log[identifier]["final_identifier"]:
mods_merged_log.pop(identifier)
else:
mods_merged_log[identifier]["final_identifier"] = _data
if pos_preserve_index > -1:
used_mods[pos_preserve_index] = _data
else:
# should never happen
used_mods.append(_data)
final_mods[identifier] = args
if self.debug:
for identifier, data in mods_merged_log.iteritems():
logger.debug("Merged %s to %s", data["identifiers"], data["final_identifier"])
# separate all mods into line and non-line mods
for identifier, args in final_mods.iteritems():
mod_cls = registry.mods[identifier]
if mod_cls.modifies_whole_file:
non_line_mods.append((identifier, args))
else:
line_mods.append((mod_cls.order, identifier, args))
# initialize the mods
if identifier not in self.initialized_mods:
self.initialized_mods[identifier] = mod_cls(self)
return line_mods, non_line_mods, used_mods
def detect_uppercase(self):
entries_used = 0
for entry in self.f:
entry_used = False
for sub in entry.text.strip().split("\N"):
# skip HI bracket entries, those might actually be lowercase
sub = sub.strip()
for processor in registry.mods["remove_HI"].processors[:4]:
sub = processor.process(sub)
if sub.strip():
if lowercase_re.search(sub):
return False
entry_used = True
else:
# skip full entry
break
if entry_used:
entries_used += 1
if entries_used == 40:
break
return True
def modify(self, *mods):
new_entries = []
start = time.time()
self.only_uppercase = self.detect_uppercase()
if self.only_uppercase and self.debug:
logger.debug("Full-uppercase subtitle found")
line_mods, non_line_mods, mods_used = self.prepare_mods(*mods)
self.mods_used = mods_used
# apply non-last file mods
if non_line_mods:
non_line_mods_start = time.time()
self.apply_non_line_mods(non_line_mods)
if self.debug:
logger.debug("Non-Line mods took %ss", time.time() - non_line_mods_start)
# sort line mods
line_mods.sort(key=lambda x: (x is None, x))
# apply line mods
if line_mods:
line_mods_start = time.time()
self.apply_line_mods(new_entries, line_mods)
if self.debug:
logger.debug("Line mods took %ss", time.time() - line_mods_start)
if new_entries:
self.f.events = new_entries
# apply last file mods
if non_line_mods:
non_line_mods_start = time.time()
self.apply_non_line_mods(non_line_mods, only_last=True)
if self.debug:
logger.debug("Final Non-Line mods took %ss", time.time() - non_line_mods_start)
if self.debug:
logger.debug("Subtitle Modification took %ss", time.time() - start)
logger.debug("Mods applied: %s" % self.mods_used)
def apply_non_line_mods(self, mods, only_last=False):
for identifier, args in mods:
mod = self.initialized_mods[identifier]
if (not only_last and not mod.apply_last) or (only_last and mod.apply_last):
if self.debug:
logger.debug("Applying %s", identifier)
mod.modify(None, debug=self.debug, parent=self, **args)
def apply_line_mods(self, new_entries, mods):
for index, entry in enumerate(self.f, 1):
applied_mods = []
lines = []
line_count = 0
start_tags = []
end_tags = []
t = entry.text.strip()
if not t:
if self.debug:
logger.debug(u"Skipping empty line: %s", index)
continue
skip_entry = False
for line in t.split(ur"\N"):
# don't bother the mods with surrounding tags
old_line = line
line = line.strip()
skip_line = False
line_count += 1
if not line:
continue
# clean {\X0} tags before processing
# fixme: handle nested tags?
start_tag = u""
end_tag = u""
if line.startswith(self.font_style_tag_start):
start_tag = line[:5]
line = line[5:]
if line[-5:-3] == self.font_style_tag_start:
end_tag = line[-5:]
line = line[:-5]
last_procs_mods = []
# fixme: this double loop is ugly
for order, identifier, args in mods:
mod = self.initialized_mods[identifier]
try:
line = mod.modify(line.strip(), entry=entry.text, debug=self.debug, parent=self, index=index,
**args)
except EmptyEntryError:
if self.debug:
logger.debug(u"%d: %s: %r -> ''", index, identifier, entry.text)
skip_entry = True
break
if not line:
if self.debug:
logger.debug(u"%d: %s: %r -> ''", index, identifier, old_line)
skip_line = True
break
applied_mods.append(identifier)
if mod.last_processors:
last_procs_mods.append([identifier, args])
if skip_entry:
lines = []
break
if skip_line:
continue
for identifier, args in last_procs_mods:
mod = self.initialized_mods[identifier]
try:
line = mod.modify(line.strip(), entry=entry.text, debug=self.debug, parent=self, index=index,
procs=["last_process"], **args)
except EmptyEntryError:
if self.debug:
logger.debug(u"%d: %s: %r -> ''", index, identifier, entry.text)
skip_entry = True
break
if not line:
if self.debug:
logger.debug(u"%d: %s: %r -> ''", index, identifier, old_line)
skip_line = True
break
if skip_entry:
lines = []
break
if skip_line:
continue
if start_tag:
start_tags.append(start_tag)
if end_tag:
end_tags.append(end_tag)
# append new line and clean possibly newly added empty tags
cleaned_line = EMPTY_TAG_PROCESSOR.process(start_tag + line + end_tag, debug=self.debug).strip()
if cleaned_line:
# we may have a single closing tag, if so, try appending it to the previous line
if len(cleaned_line) == 5 and cleaned_line.startswith("{\\") and cleaned_line.endswith("0}"):
if lines:
prev_line = lines.pop()
lines.append(prev_line + cleaned_line)
continue
lines.append(cleaned_line)
else:
if self.debug:
logger.debug(u"%d: Ditching now empty line (%r)", index, line)
if not lines:
# don't bother logging when the entry only had one line
if self.debug and line_count > 1:
logger.debug(u"%d: %r -> ''", index, entry.text)
continue
new_text = ur"\N".join(lines)
# cheap man's approach to avoid open tags
add_start_tags = []
add_end_tags = []
if len(start_tags) != len(end_tags):
for tag in start_tags:
end_tag = tag.replace("1", "0")
if end_tag not in end_tags and new_text.count(tag) > new_text.count(end_tag):
add_end_tags.append(end_tag)
for tag in end_tags:
start_tag = tag.replace("0", "1")
if start_tag not in start_tags and new_text.count(tag) > new_text.count(start_tag):
add_start_tags.append(start_tag)
if add_end_tags or add_start_tags:
entry.text = u"".join(add_start_tags) + new_text + u"".join(add_end_tags)
if self.debug:
logger.debug(u"Fixing tags: %s (%r -> %r)", str(add_start_tags+add_end_tags), new_text,
entry.text)
else:
entry.text = new_text
else:
entry.text = new_text
new_entries.append(entry)
SubMod = SubtitleModifications
|
JPalmerio/GRB_population_code
|
refs/heads/master
|
grbpop/basic_example.py
|
1
|
from cosmology import init_cosmology
from GRB_population import create_GRB_population_from
import io_grb_pop as io
import numpy as np
import logging
import sys
log = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s',
datefmt='%H:%M:%S')
logging.getLogger('matplotlib').setLevel(logging.WARNING)
# Define the paths used by the code
paths_to_dir, paths_to_files = io.generate_paths(conf_fname='config_basic_example.yml',
param_fname='parameters_simple_example.yml',
init_dir=None)
# Read the input files
config, params, instruments, samples, obs_constraints = io.read_init_files(paths_to_files)
# Code calculates which samples, instruments, and constraints to include
incl_samples, incl_instruments, incl_constraints = io.create_config(config,
samples,
instruments,
obs_constraints)
# Initialize the cosmology
cosmo = init_cosmology(paths_to_dir['cosmo'])
# Generate the GRB population
np.random.seed(0)
gp = create_GRB_population_from(Nb_GRBs=config['Nb_GRBs'],
cosmo=cosmo,
params=params,
incl_samples=incl_samples,
incl_instruments=incl_instruments,
incl_constraints=incl_constraints,
output_dir=paths_to_dir['output'])
|
jmighion/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/aws/direct_connect.py
|
14
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Direct Connect modules.
"""
import traceback
try:
import botocore
except ImportError:
pass
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class DirectConnectError(Exception):
def __init__(self, msg, last_traceback=None, response=None):
response = {} if response is None else response
self.msg = msg
self.last_traceback = last_traceback
self.response = camel_dict_to_snake_dict(response)
def delete_connection(client, connection_id):
try:
client.delete_connection(connectionId=connection_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
last_traceback=traceback.format_exc(),
response=e.response)
def associate_connection_and_lag(client, connection_id, lag_id):
try:
client.associate_connection_with_lag(connectionId=connection_id,
lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
" with link aggregation group {1}.".format(connection_id, lag_id),
last_traceback=traceback.format_exc(),
response=e.response)
def disassociate_connection_and_lag(client, connection_id, lag_id):
try:
client.disassociate_connection_from_lag(connectionId=connection_id,
lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
" from link aggregation group {1}.".format(connection_id, lag_id),
last_traceback=traceback.format_exc(),
response=e.response)
def delete_virtual_interface(client, virtual_interface):
try:
client.delete_virtual_interface(virtualInterfaceId=virtual_interface)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
last_traceback=traceback.format_exc(),
response=e.response)
|
FRC-Team-3140/north-american-happiness
|
refs/heads/master
|
lib/python2.7/site-packages/flask/_compat.py
|
783
|
# -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
|
eeshangarg/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/internet/qtreactor.py
|
69
|
# -*- test-case-name: twisted.internet.test.test_qtreactor -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
try:
# 'import qtreactor' would have imported this file instead of the
# top-level qtreactor. __import__ does the right thing
# (kids, don't repeat this at home)
install = __import__('qtreactor').install
except ImportError:
from twisted.plugins.twisted_qtstub import errorMessage
raise ImportError(errorMessage)
else:
import warnings
warnings.warn("Please use qtreactor instead of twisted.internet.qtreactor",
category=DeprecationWarning)
__all__ = ['install']
|
Kazade/NeHe-Website
|
refs/heads/master
|
autoload/models.py
|
133
|
# Load the siteconf module
from django.conf import settings
from django.utils.importlib import import_module
SITECONF_MODULE = getattr(settings, 'AUTOLOAD_SITECONF', settings.ROOT_URLCONF)
import_module(SITECONF_MODULE)
|
openstack/python-troveclient
|
refs/heads/master
|
troveclient/tests/osc/v1/test_database_quota.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient.osc.v1 import database_quota
from troveclient.tests.osc.v1 import fakes
class TestQuota(fakes.TestDatabasev1):
fake_quota = fakes.FakeQuota()
def setUp(self):
super(TestQuota, self).setUp()
self.mock_client = self.app.client_manager.database
self.quota_client = self.app.client_manager.database.quota
class TestQuotaShow(TestQuota):
columns = database_quota.ShowDatabaseQuota.columns
values = [('instances', 2, 1, 10),
('backups', 4, 3, 50),
('volumes', 6, 5, 40)]
def setUp(self):
super(TestQuotaShow, self).setUp()
self.cmd = database_quota.ShowDatabaseQuota(self.app, None)
self.data = self.fake_quota.get_quotas()
self.quota_client.show.return_value = self.data
def test_show_quotas(self):
args = ['tenant_id']
parsed_args = self.check_parser(self.cmd, args, [])
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(self.columns, columns)
self.assertEqual(self.values, data)
class TestQuotaUpdate(TestQuota):
def setUp(self):
super(TestQuotaUpdate, self).setUp()
self.cmd = database_quota.UpdateDatabaseQuota(self.app, None)
self.data = self.fake_quota.fake_instances_quota
self.quota_client.update.return_value = self.data
def test_update_quota(self):
args = ['tenant_id', 'instances', '51']
parsed_args = self.check_parser(self.cmd, args, [])
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(('instances',), columns)
self.assertEqual((51,), data)
|
nmrao/robotframework
|
refs/heads/master
|
src/robot/model/message.py
|
16
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import html_escape, setter
from .itemlist import ItemList
from .modelobject import ModelObject
class Message(ModelObject):
"""A message outputted during the test execution.
The message can be a log message triggered by a keyword, or a warning
or an error occurred during the test execution.
"""
__slots__ = ['message', 'level', 'html', 'timestamp', 'parent', '_sort_key']
def __init__(self, message='', level='INFO', html=False, timestamp=None,
parent=None):
#: The message content as a string.
self.message = message
#: Severity of the message. Either ``TRACE``, ``INFO``,
#: ``WARN``, ``DEBUG`` or ``FAIL``/``ERROR``.
self.level = level
#: ``True`` if the content is in HTML, ``False`` otherwise.
self.html = html
#: Timestamp in format ``%Y%m%d %H:%M:%S.%f``.
self.timestamp = timestamp
self._sort_key = -1
#: The object this message was triggered by.
self.parent = parent
@setter
def parent(self, parent):
if parent and parent is not getattr(self, 'parent', None):
self._sort_key = getattr(parent, '_child_sort_key', -1)
return parent
@property
def html_message(self):
"""Returns the message content as HTML."""
return self.message if self.html else html_escape(self.message)
def visit(self, visitor):
visitor.visit_message(self)
def __unicode__(self):
return self.message
class Messages(ItemList):
__slots__ = []
def __init__(self, message_class=Message, parent=None, messages=None):
ItemList.__init__(self, message_class, {'parent': parent}, messages)
def __setitem__(self, index, item):
old = self[index]
ItemList.__setitem__(self, index, item)
self[index]._sort_key = old._sort_key
|
40223226/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/SDL.py
|
603
|
from browser import document
SDL_INIT_VIDEO=0
SDL_GL_DOUBLEBUFFER=1
SDL_GL_DEPTH_SIZE=2
SDL_DOUBLEBUF=3
SDL_ANYFORMAT=4
SDL_ACTIVEEVENT=5
SDL_ALLEVENTS=5
SDL_KEYDOWN=6
SDL_KEYUP=7
SDL_MOUSEMOTION=8
SDL_MOUSEBUTTONDOWN=9
SDL_MOUSEBUTTONUP=10
SDL_JOYAXISMOTION=11
SDL_JOYBALLMOTION=12
SDL_JOYHATMOTION=13
SDL_JOYBUTTONUP=14
SDL_JOYBUTTONDOWN=15
SDL_QUIT=16
SDL_SYSWMEVENT=17
SDL_VIDEORESIZE=18
SDL_VIDEOEXPOSE=19
SDL_NOEVENT=20
SDL_GETEVENT=21
SDL_OPENGL=False
def SDL_WasInit(var):
return True
_attrs={}
_wm={}
def SDL_PeepEvents(num, event, mask):
pass
def SDL_GL_SetAttribute(variable, value):
_attrs[variable]=value
def SDL_GL_GetAttribute(variable):
return _attrs.getvalue(variable, None)
def SDL_GL_SetVideoMode(width, height, depth, flags):
pass
def SDL_WM_SetCaption(title, icontitle):
_wm['title']=title
_wm['icontitle']=icontitle
def SDL_PumpEvents():
pass
def SDL_SetVideoMode(width, height, depth, flags):
pass
def SDL_SetColorKey(surface, key, value):
pass
def SDL_WM_GetCaption():
return _wm.get('title', ''), _wm.get('icontitle', '')
def SDL_UpdateRect(screen, x1, y1, x2, y2):
screen.canvas.style.width=screen.canvas.style.width
def SDL_UpdateRects(screen, rects):
for _rect in rects:
SDL_UpdateRect(screen, _rect)
def SDL_GetVideoSurface():
return _Screen
def SDL_GetVideoInfo():
return
def SDL_VideoModeOK(width, height, depth, flags):
pass
def SDL_SetPalette(surface, sdl_var, colors, flag):
pass
class Screen:
def __init__(self):
self.flags=0
@property
def canvas(self):
return document.get(selector='canvas')[0]
_Screen=Screen()
class SDL_Rect:
def __init__(self, x, y, w, h):
self.x=x
self.y=y
self.w=w
self.h=h
def SDL_Flip(screen):
pass
|
mavit/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_permission.py
|
41
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_permission
short_description: Module to manage permissions of users/groups in oVirt/RHV
version_added: "2.3"
author:
- Ondra Machacek (@machacekondra)
description:
- Module to manage permissions of users/groups in oVirt/RHV.
options:
role:
description:
- Name of the role to be assigned to user/group on specific object.
default: UserRole
state:
description:
- Should the permission be present/absent.
choices: [ absent, present ]
default: present
object_id:
description:
- ID of the object where the permissions should be managed.
object_name:
description:
- Name of the object where the permissions should be managed.
object_type:
description:
- The object where the permissions should be managed.
choices:
- cluster
- cpu_profile
- data_center
- disk
- disk_profile
- host
- network
- storage_domain
- system
- template
- vm
- vm_pool
- vnic_profile
default: vm
user_name:
description:
- Username of the user to manage. In most LDAPs it's I(uid) of the user,
but in Active Directory you must specify I(UPN) of the user.
- Note that if user does not exist in the system this module will fail,
you should ensure the user exists by using M(ovirt_users) module.
group_name:
description:
- Name of the group to manage.
- Note that if group does not exist in the system this module will fail,
you should ensure the group exists by using M(ovirt_groups) module.
authz_name:
description:
- Authorization provider of the user/group.
required: true
aliases: [ domain ]
namespace:
description:
- Namespace of the authorization provider, where user/group resides.
quota_name:
description:
- Name of the quota to assign permission. Works only with C(object_type) I(data_center).
version_added: "2.7"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Add user user1 from authorization provider example.com-authz
ovirt_permission:
user_name: user1
authz_name: example.com-authz
object_type: vm
object_name: myvm
role: UserVmManager
- name: Remove permission from user
ovirt_permission:
state: absent
user_name: user1
authz_name: example.com-authz
object_type: cluster
object_name: mycluster
role: ClusterAdmin
- name: Assign QuotaConsumer role to user
ovirt_permissions:
state: present
user_name: user1
authz_name: example.com-authz
object_type: data_center
object_name: mydatacenter
quota_name: myquota
role: QuotaConsumer
'''
RETURN = '''
id:
description: ID of the permission which is managed
returned: On success if permission is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
permission:
description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
returned: On success if permission is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
follow_link,
get_link_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
get_id_by_name
)
def _objects_service(connection, object_type):
if object_type == 'system':
return connection.system_service()
return getattr(
connection.system_service(),
'%ss_service' % object_type,
None,
)()
def _object_service(connection, module):
object_type = module.params['object_type']
objects_service = _objects_service(connection, object_type)
if object_type == 'system':
return objects_service
object_id = module.params['object_id']
if object_id is None:
sdk_object = search_by_name(objects_service, module.params['object_name'])
if sdk_object is None:
raise Exception(
"'%s' object '%s' was not found." % (
module.params['object_type'],
module.params['object_name']
)
)
object_id = sdk_object.id
object_service = objects_service.service(object_id)
if module.params['quota_name'] and object_type == 'data_center':
quotas_service = object_service.quotas_service()
return quotas_service.quota_service(get_id_by_name(quotas_service, module.params['quota_name']))
return object_service
def _permission(module, permissions_service, connection):
for permission in permissions_service.list():
user = follow_link(connection, permission.user)
if (
equal(module.params['user_name'], user.principal if user else None) and
equal(module.params['group_name'], get_link_name(connection, permission.group)) and
equal(module.params['role'], get_link_name(connection, permission.role))
):
return permission
class PermissionsModule(BaseModule):
def _user(self):
user = search_by_attributes(
self._connection.system_service().users_service(),
usrname="{name}@{authz_name}".format(
name=self._module.params['user_name'],
authz_name=self._module.params['authz_name'],
),
)
if user is None:
raise Exception("User '%s' was not found." % self._module.params['user_name'])
return user
def _group(self):
groups = self._connection.system_service().groups_service().list(
search="name={name}".format(
name=self._module.params['group_name'],
)
)
# If found more groups, filter them by namespace and authz name:
# (filtering here, as oVirt/RHV backend doesn't support it)
if len(groups) > 1:
groups = [
g for g in groups if (
equal(self._module.params['namespace'], g.namespace) and
equal(self._module.params['authz_name'], g.domain.name)
)
]
if not groups:
raise Exception("Group '%s' was not found." % self._module.params['group_name'])
return groups[0]
def build_entity(self):
entity = self._group() if self._module.params['group_name'] else self._user()
return otypes.Permission(
user=otypes.User(
id=entity.id
) if self._module.params['user_name'] else None,
group=otypes.Group(
id=entity.id
) if self._module.params['group_name'] else None,
role=otypes.Role(
name=self._module.params['role']
),
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(type='str', default='present', choices=['absent', 'present']),
role=dict(type='str', default='UserRole'),
object_type=dict(type='str', default='vm',
choices=[
'cluster',
'cpu_profile',
'data_center',
'disk',
'disk_profile',
'host',
'network',
'storage_domain',
'system',
'template',
'vm',
'vm_pool',
'vnic_profile',
]),
authz_name=dict(type='str', required=True, aliases=['domain']),
object_id=dict(type='str'),
object_name=dict(type='str'),
user_name=dict(type='str'),
group_name=dict(type='str'),
namespace=dict(type='str'),
quota_name=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
if (module.params['object_name'] is None and module.params['object_id'] is None) and module.params['object_type'] != 'system':
module.fail_json(msg='"object_name" or "object_id" is required')
if module.params['user_name'] is None and module.params['group_name'] is None:
module.fail_json(msg='"user_name" or "group_name" is required')
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
permissions_service = _object_service(connection, module).permissions_service()
permissions_module = PermissionsModule(
connection=connection,
module=module,
service=permissions_service,
)
permission = _permission(module, permissions_service, connection)
state = module.params['state']
if state == 'present':
ret = permissions_module.create(entity=permission)
elif state == 'absent':
ret = permissions_module.remove(entity=permission)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
translate/pootle
|
refs/heads/master
|
pootle/apps/pootle_app/migrations/0011_directory_tp.py
|
8
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-04 07:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pootle_translationproject', '0003_realpath_can_be_none'),
('pootle_app', '0010_obsolete_path_idx'),
]
operations = [
migrations.AddField(
model_name='directory',
name='tp',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dirs', to='pootle_translationproject.TranslationProject'),
),
]
|
tlatzko/spmcluster
|
refs/heads/master
|
.tox/2.7-nocov/lib/python2.7/site-packages/pip/utils/build.py
|
899
|
from __future__ import absolute_import
import os.path
import tempfile
from pip.utils import rmtree
class BuildDirectory(object):
def __init__(self, name=None, delete=None):
# If we were not given an explicit directory, and we were not given an
# explicit delete option, then we'll default to deleting.
if name is None and delete is None:
delete = True
if name is None:
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-"))
# If we were not given an explicit directory, and we were not given
# an explicit delete option, then we'll default to deleting.
if delete is None:
delete = True
self.name = name
self.delete = delete
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self.delete:
rmtree(self.name)
|
Kakadu/xcp-xapi
|
refs/heads/master
|
ocaml/idl/binding_sanity_checks/unpoolify.py
|
34
|
#!/usr/bin/env python
import XenAPI
import sanitychecklib
#log in to the master
print "logging in to ",sanitychecklib.server
session=sanitychecklib.getsession()
sx=session.xenapi
#find the secondary host by name
secondaryserver_list=sx.host.get_by_name_label(sanitychecklib.secondaryserver)
if len(secondaryserver_list)==1:
secondaryserver=secondaryserver_list[0]
#eject it from the pool
print "ejecting", sanitychecklib.secondaryserver, "from the pool"
sx.pool.eject(secondaryserver)
else:
print "there is no host", sanitychecklib.secondaryserver, "in the pool associated with", sanitychecklib.server
#the eject operation takes ages.....
#log out
session.logout()
|
hellarafa/ART405-REPFINDER-Project2
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/input.py
|
578
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
# Open the build file for read ('r') with universal-newlines mode ('U')
# to make sure platform specific newlines ('\r\n' or '\r') are converted to '\n'
# which otherwise will fail eval()
build_file_contents = open(build_file_path, 'rU').read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.