code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import numpy as np
from milk.supervised.base import supervised_model
class fast_classifier(object):
def __init__(self):
pass
def set_option(self, _k, _v):
pass
def train(self, features, labels, **kwargs):
examples = {}
for f,lab in zip(features, labels):
if lab not in examples:
examples[lab] = f
return fast_model(examples)
class fast_model(supervised_model):
def __init__(self, examples):
self.examples = examples
assert len(self.examples)
def apply(self, f):
best = None
best_val = +np.inf
for k,v in self.examples.items():
d = v-f
dist = np.dot(d,d)
if dist < best_val:
best = k
best_val = dist
return best
|
pombredanne/milk
|
milk/tests/fast_classifier.py
|
Python
|
mit
| 821
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-mintxfee=0.00001", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.01}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
if __name__ == '__main__':
MempoolLimitTest().main()
|
yavwa/Shilling
|
test/functional/mempool_limit.py
|
Python
|
mit
| 2,527
|
__all__ = ['E']
import operator
import sys
import threading
import numpy
# Declare a double type that does not exist in Python space
double = numpy.double
# The default kind for undeclared variables
default_kind = 'double'
type_to_kind = {bool: 'bool', int: 'int', long: 'long', float: 'float',
double: 'double', complex: 'complex', str: 'str'}
kind_to_type = {'bool': bool, 'int': int, 'long': long, 'float': float,
'double': double, 'complex': complex, 'str': str}
kind_rank = ['bool', 'int', 'long', 'float', 'double', 'complex', 'none']
from numexpr import interpreter
class Expression(object):
def __init__(self):
object.__init__(self)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return VariableNode(name, default_kind)
E = Expression()
class Context(threading.local):
initialized = False
def __init__(self, dict_):
if self.initialized:
raise SystemError('__init__ called too many times')
self.initialized = True
self.__dict__.update(dict_)
def get(self, value, default):
return self.__dict__.get(value, default)
def get_current_context(self):
return self.__dict__
def set_new_context(self, dict_):
self.__dict__.update(dict_)
# This will be called each time the local object is used in a separate thread
_context = Context({})
def get_optimization():
return _context.get('optimization', 'none')
# helper functions for creating __magic__ methods
def ophelper(f):
def func(*args):
args = list(args)
for i, x in enumerate(args):
if isConstant(x):
args[i] = x = ConstantNode(x)
if not isinstance(x, ExpressionNode):
raise TypeError("unsupported object type: %s" % (type(x),))
return f(*args)
func.__name__ = f.__name__
func.__doc__ = f.__doc__
func.__dict__.update(f.__dict__)
return func
def allConstantNodes(args):
"returns True if args are all ConstantNodes."
for x in args:
if not isinstance(x, ConstantNode):
return False
return True
def isConstant(ex):
"Returns True if ex is a constant scalar of an allowed type."
return isinstance(ex, (bool, int, long, float, double, complex, str))
def commonKind(nodes):
node_kinds = [node.astKind for node in nodes]
str_count = node_kinds.count('str')
if 0 < str_count < len(node_kinds): # some args are strings, but not all
raise TypeError("strings can only be operated with strings")
if str_count > 0: # if there are some, all of them must be
return 'str'
n = -1
for x in nodes:
n = max(n, kind_rank.index(x.astKind))
return kind_rank[n]
max_int32 = 2147483647
min_int32 = -max_int32 - 1
def bestConstantType(x):
if isinstance(x, str): # ``numpy.string_`` is a subclass of ``str``
return str
# ``long`` objects are kept as is to allow the user to force
# promotion of results by using long constants, e.g. by operating
# a 32-bit array with a long (64-bit) constant.
if isinstance(x, (long, numpy.int64)):
return long
# ``double`` objects are kept as is to allow the user to force
# promotion of results by using double constants, e.g. by operating
# a float (32-bit) array with a double (64-bit) constant.
if isinstance(x, (double)):
return double
# Numeric conversion to boolean values is not tried because
# ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be
# interpreted as booleans when ``False`` and ``True`` are already
# supported.
if isinstance(x, (bool, numpy.bool_)):
return bool
# ``long`` is not explicitly needed since ``int`` automatically
# returns longs when needed (since Python 2.3).
# The duality of float and double in Python avoids that we have to list
# ``double`` too.
for converter in int, float, complex:
try:
y = converter(x)
except StandardError, err:
continue
if x == y:
# Constants needing more than 32 bits are always
# considered ``long``, *regardless of the platform*, so we
# can clearly tell 32- and 64-bit constants apart.
if converter is int and not (min_int32 <= x <= max_int32):
return long
return converter
def getKind(x):
converter = bestConstantType(x)
return type_to_kind[converter]
def binop(opname, reversed=False, kind=None):
# Getting the named method from self (after reversal) does not
# always work (e.g. int constants do not have a __lt__ method).
opfunc = getattr(operator, "__%s__" % opname)
@ophelper
def operation(self, other):
if reversed:
self, other = other, self
if allConstantNodes([self, other]):
return ConstantNode(opfunc(self.value, other.value))
else:
return OpNode(opname, (self, other), kind=kind)
return operation
def func(func, minkind=None, maxkind=None):
@ophelper
def function(*args):
if allConstantNodes(args):
return ConstantNode(func(*[x.value for x in args]))
kind = commonKind(args)
if kind in ('int', 'long'):
# Exception for following NumPy casting rules
kind = 'double'
else:
# Apply regular casting rules
if minkind and kind_rank.index(minkind) > kind_rank.index(kind):
kind = minkind
if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind):
kind = maxkind
return FuncNode(func.__name__, args, kind)
return function
@ophelper
def where_func(a, b, c):
if isinstance(a, ConstantNode):
raise ValueError("too many dimensions")
if allConstantNodes([a,b,c]):
return ConstantNode(numpy.where(a, b, c))
return FuncNode('where', [a,b,c])
def encode_axis(axis):
if isinstance(axis, ConstantNode):
axis = axis.value
if axis is None:
axis = interpreter.allaxes
else:
if axis < 0:
axis = interpreter.maxdims - axis
if axis > 254:
raise ValueError("cannot encode axis")
return RawNode(axis)
def sum_func(a, axis=-1):
axis = encode_axis(axis)
if isinstance(a, ConstantNode):
return a
if isinstance(a, (bool, int, long, float, double, complex)):
a = ConstantNode(a)
return FuncNode('sum', [a, axis], kind=a.astKind)
def prod_func(a, axis=-1):
axis = encode_axis(axis)
if isinstance(a, (bool, int, long, float, double, complex)):
a = ConstantNode(a)
if isinstance(a, ConstantNode):
return a
return FuncNode('prod', [a, axis], kind=a.astKind)
@ophelper
def div_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1./b.value)])
return OpNode('div', [a,b])
@ophelper
def pow_op(a, b):
if allConstantNodes([a,b]):
return ConstantNode(a**b)
if isinstance(b, ConstantNode):
x = b.value
if get_optimization() == 'aggressive':
RANGE = 50 # Approximate break even point with pow(x,y)
# Optimize all integral and half integral powers in [-RANGE, RANGE]
# Note: for complex numbers RANGE could be larger.
if (int(2*x) == 2*x) and (-RANGE <= abs(x) <= RANGE):
n = int(abs(x))
ishalfpower = int(abs(2*x)) % 2
def multiply(x, y):
if x is None: return y
return OpNode('mul', [x, y])
r = None
p = a
mask = 1
while True:
if (n & mask):
r = multiply(r, p)
mask <<= 1
if mask > n:
break
p = OpNode('mul', [p,p])
if ishalfpower:
kind = commonKind([a])
if kind in ('int', 'long'): kind = 'double'
r = multiply(r, OpNode('sqrt', [a], kind))
if r is None:
r = OpNode('ones_like', [a])
if x < 0:
r = OpNode('div', [ConstantNode(1), r])
return r
if get_optimization() in ('moderate', 'aggressive'):
if x == -1:
return OpNode('div', [ConstantNode(1),a])
if x == 0:
return FuncNode('ones_like', [a])
if x == 0.5:
kind = a.astKind
if kind in ('int', 'long'): kind = 'double'
return FuncNode('sqrt', [a], kind=kind)
if x == 1:
return a
if x == 2:
return OpNode('mul', [a,a])
return OpNode('pow', [a,b])
# The functions and the minimum and maximum types accepted
functions = {
'copy' : func(numpy.copy),
'ones_like' : func(numpy.ones_like),
'sqrt' : func(numpy.sqrt, 'float'),
'sin' : func(numpy.sin, 'float'),
'cos' : func(numpy.cos, 'float'),
'tan' : func(numpy.tan, 'float'),
'arcsin' : func(numpy.arcsin, 'float'),
'arccos' : func(numpy.arccos, 'float'),
'arctan' : func(numpy.arctan, 'float'),
'sinh' : func(numpy.sinh, 'float'),
'cosh' : func(numpy.cosh, 'float'),
'tanh' : func(numpy.tanh, 'float'),
'arcsinh' : func(numpy.arcsinh, 'float'),
'arccosh' : func(numpy.arccosh, 'float'),
'arctanh' : func(numpy.arctanh, 'float'),
'fmod' : func(numpy.fmod, 'float'),
'arctan2' : func(numpy.arctan2, 'float'),
'log' : func(numpy.log, 'float'),
'log1p' : func(numpy.log1p, 'float'),
'log10' : func(numpy.log10, 'float'),
'exp' : func(numpy.exp, 'float'),
'expm1' : func(numpy.expm1, 'float'),
'abs': func(numpy.absolute, 'float'),
'where' : where_func,
'real' : func(numpy.real, 'double', 'double'),
'imag' : func(numpy.imag, 'double', 'double'),
'complex' : func(complex, 'complex'),
'sum' : sum_func,
'prod' : prod_func,
}
class ExpressionNode(object):
"""An object that represents a generic number object.
This implements the number special methods so that we can keep
track of how this object has been used.
"""
astType = 'generic'
def __init__(self, value=None, kind=None, children=None):
object.__init__(self)
self.value = value
if kind is None:
kind = 'none'
self.astKind = kind
if children is None:
self.children = ()
else:
self.children = tuple(children)
def get_real(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).real)
return OpNode('real', (self,), 'double')
real = property(get_real)
def get_imag(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).imag)
return OpNode('imag', (self,), 'double')
imag = property(get_imag)
def __str__(self):
return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value,
self.astKind, self.children)
def __repr__(self):
return self.__str__()
def __neg__(self):
return OpNode('neg', (self,))
def __invert__(self):
return OpNode('invert', (self,))
def __pos__(self):
return self
__add__ = __radd__ = binop('add')
__sub__ = binop('sub')
__rsub__ = binop('sub', reversed=True)
__mul__ = __rmul__ = binop('mul')
__div__ = div_op
__rdiv__ = binop('div', reversed=True)
__pow__ = pow_op
__rpow__ = binop('pow', reversed=True)
__mod__ = binop('mod')
__rmod__ = binop('mod', reversed=True)
# boolean operations
__and__ = binop('and', kind='bool')
__or__ = binop('or', kind='bool')
__gt__ = binop('gt', kind='bool')
__ge__ = binop('ge', kind='bool')
__eq__ = binop('eq', kind='bool')
__ne__ = binop('ne', kind='bool')
__lt__ = binop('gt', reversed=True, kind='bool')
__le__ = binop('ge', reversed=True, kind='bool')
class LeafNode(ExpressionNode):
leafNode = True
class VariableNode(LeafNode):
astType = 'variable'
def __init__(self, value=None, kind=None, children=None):
LeafNode.__init__(self, value=value, kind=kind)
class RawNode(object):
"""Used to pass raw integers to interpreter.
For instance, for selecting what function to use in func1.
Purposely don't inherit from ExpressionNode, since we don't wan't
this to be used for anything but being walked.
"""
astType = 'raw'
astKind = 'none'
def __init__(self, value):
self.value = value
self.children = ()
def __str__(self):
return 'RawNode(%s)' % (self.value,)
__repr__ = __str__
class ConstantNode(LeafNode):
astType = 'constant'
def __init__(self, value=None, children=None):
kind = getKind(value)
# Python float constants are double precision by default
if kind == 'float':
kind = 'double'
LeafNode.__init__(self, value=value, kind=kind)
def __neg__(self):
return ConstantNode(-self.value)
def __invert__(self):
return ConstantNode(~self.value)
class OpNode(ExpressionNode):
astType = 'op'
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
ExpressionNode.__init__(self, value=opcode, kind=kind, children=args)
class FuncNode(OpNode):
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
OpNode.__init__(self, opcode, args, kind)
|
erdc-cm/numexpr
|
numexpr/expressions.py
|
Python
|
mit
| 14,057
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import json
import logging
import traceback
import time
from google.appengine.api import app_identity, mail, capabilities
from google.appengine.runtime import DeadlineExceededError
from tekton.gae.middleware import Middleware
from tekton.router import PathNotFound
def get_apis_statuses(e):
if not isinstance(e, DeadlineExceededError):
return {}
t1 = time.time()
statuses = {
'blobstore': capabilities.CapabilitySet('blobstore').is_enabled(),
'datastore_v3': capabilities.CapabilitySet('datastore_v3').is_enabled(),
'datastore_v3_write': capabilities.CapabilitySet('datastore_v3', ['write']).is_enabled(),
'images': capabilities.CapabilitySet('images').is_enabled(),
'mail': capabilities.CapabilitySet('mail').is_enabled(),
'memcache': capabilities.CapabilitySet('memcache').is_enabled(),
'taskqueue': capabilities.CapabilitySet('taskqueue').is_enabled(),
'urlfetch': capabilities.CapabilitySet('urlfetch').is_enabled(),
}
t2 = time.time()
statuses['time'] = t2 - t1
return statuses
def send_error_to_admins(settings, exception, handler, render, template):
tb = traceback.format_exc()
errmsg = exception.message
logging.error(errmsg)
logging.error(tb)
handler.response.write(render(template))
appid = app_identity.get_application_id()
subject = 'ERROR in %s: [%s] %s' % (appid, handler.request.path, errmsg)
body = """
------------- request ------------
%s
----------------------------------
------------- GET params ---------
%s
----------------------------------
----------- POST params ----------
%s
----------------------------------
----------- traceback ------------
%s
----------------------------------
""" % (handler.request, handler.request.GET, handler.request.POST, tb)
body += 'API statuses = ' + json.dumps(get_apis_statuses(exception), indent=4)
mail.send_mail_to_admins(sender=settings.SENDER_EMAIL,
subject=subject,
body=body)
class EmailMiddleware(Middleware):
def handle_error(self, exception):
import settings # workaround. See https://github.com/renzon/zenwarch/issues/3
if isinstance(exception, PathNotFound):
self.handler.response.set_status(404)
send_error_to_admins(settings, exception, self.handler, self.dependencies['_render'],
settings.TEMPLATE_404_ERROR)
else:
self.handler.response.set_status(400)
send_error_to_admins(settings, exception, self.handler, self.dependencies['_render'],
settings.TEMPLATE_400_ERROR)
|
andersonsilvade/5semscript
|
tekton/gae/middleware/email_errors.py
|
Python
|
mit
| 2,781
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Michael Krause ( http://krause-software.com/ ).
# You are free to use this code under the MIT license:
# http://opensource.org/licenses/MIT
"""Show some histograms for a directory a Xcode project files."""
from __future__ import print_function
import sys
import argparse
from os.path import abspath, dirname, join
import multiprocessing
from collections import defaultdict, Counter
import codecs
# Set up the Python path so we find the xcodeprojer module in the parent directory
# relative to this file.
sys.path.insert(1, dirname(dirname(abspath(__file__))))
import utils
import xcodeprojer
from xcodeprojer import bytestr, unistr
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
unichr = chr
try:
NARROW_BUILD = len(unichr(0x1f300)) == 2
except ValueError:
NARROW_BUILD = True
DEFAULT_FIRSTNAMES = 200
user_hash = xcodeprojer.UniqueXcodeIDGenerator.user_hash
emojis = []
def here():
return dirname(abspath(__file__))
def rel(filename):
return join(here(), filename)
def write(s, end='\n'):
s = unistr(s) + unistr(end)
s = s.encode('utf-8')
if PY2:
sys.stdout.write(s)
else:
sys.stdout.buffer.write(s)
def writeline():
write('\n')
def uniord(s):
"""ord that works on surrogate pairs.
"""
try:
return ord(s)
except TypeError:
pass
if len(s) != 2:
raise
return 0x10000 + ((ord(s[0]) - 0xd800) << 10) | (ord(s[1]) - 0xdc00)
def iterchars(text):
if not NARROW_BUILD:
for c in text:
yield c
idx = 0
while idx < len(text):
c = text[idx]
if ord(c) >= 0x100:
# When we are running on a narrow Python build
# we have to deal with surrogate pairs ourselves.
if ((0xD800 < ord(c) <= 0xDBFF)
and (idx < len(text) - 1)
and (0xDC00 < ord(text[idx + 1]) <= 0xDFFF)):
c = text[idx:idx+2]
# Skip the other half of the lead and trail surrogate
idx += 1
idx += 1
yield c
def build_emoji_table():
with codecs.open(rel('emojis.txt'), 'r', encoding='utf-8') as f:
text = f.read()
uniques = set()
for c in iterchars(text):
# Only use unicode chars >= 0x100 (emoji etc.)
if len(c) >= 2 or ord(c) >= 0x100:
if c not in uniques:
emojis.append(c)
uniques.add(c)
def print_emoji_table():
per_line = 32
for i in range(len(emojis)):
if i % per_line == 0:
write("%3d" % i, end=' ')
write(emojis[i], end=' ')
if i % per_line == per_line - 1:
writeline()
writeline()
def print_emoji_histo(histo):
all_users = set()
for year, users in histo.items():
all_users.update(users)
all_users = sorted(all_users)
num_users = len(all_users)
for year, users in histo.items():
chars = [str(year), ' ']
for i in range(num_users):
if all_users[i] in users:
c = emojis[all_users[i]] + ' '
else:
c = ' '
chars.append(c)
write(''.join(chars))
write('\n')
def print_histo(histo, utcoffset=0):
maximum = max(histo.values())
max_display = 60
for k in sorted(histo):
if utcoffset != 0:
localhour = (k - utcoffset) % 24
else:
localhour = k
v = histo.get(localhour, 0)
stars = '*' * int(v * max_display / float(maximum))
write("%3d %5d %s" % (k, v, stars))
writeline()
def gidtable(filename):
with open(filename, 'rb') as f:
xcodeproj = f.read()
root, parseinfo = xcodeprojer.parse(xcodeproj)
if root is not None:
unparser = xcodeprojer.Unparser(root)
# We don't need the parse tree, only access to the gidcomments
# that are built during the unparse.
_ = unparser.unparse(root, projectname=xcodeprojer.projectname_for_path(filename))
gidcomments = unparser.gidcomments
c = '.'
else:
gidcomments = {}
c = 'X'
sys.stdout.write(c)
sys.stdout.flush()
return filename, gidcomments
def histogram(args, utcoffset=0):
if args.emoji or args.emojitable:
write("Please be patient when your computer is caching emoji fonts for you. This might take a minute.\n")
build_emoji_table()
if args.emojitable:
print_emoji_table()
return
path = args.directory
histo_year = Counter()
histo_hour = Counter()
users_per_year = defaultdict(set)
pool = multiprocessing.Pool(initializer=utils.per_process_init)
filenames = xcodeprojer.find_projectfiles(path)
results = []
write("Looking for Xcode ids in project files...")
sys.stdout.flush()
for idx, filename in enumerate(filenames):
results.append(pool.apply_async(gidtable, [filename]))
if args.max_files is not None and idx + 1 >= args.max_files:
break
pool.close()
try:
for asyncresult in results:
filename, gids = asyncresult.get()
for gid in gids:
fields = xcodeprojer.gidfields(gids, gid)
refdate = fields['date']
dt = xcodeprojer.datetime_from_utc(refdate)
histo_hour[dt.hour] += 1
year = dt.year
if args.startyear <= year <= args.endyear:
histo_year[year] += 1
users_per_year[year].add(fields['user'])
except (KeyboardInterrupt, GeneratorExit):
pool.terminate()
finally:
pool.join()
writeline()
write("At which hours are new Xcode ids created (UTC time offset: %d)" % args.utcoffset)
print_histo(histo_hour, utcoffset=utcoffset)
write("In which years were the Xcode ids created (we only look at %s-%s)" % (args.startyear, args.endyear))
print_histo(histo_year)
write("Estimated number of users creating new Xcode ids by year")
user_histo = {k: len(v) for (k, v) in users_per_year.items()}
print_histo(user_histo)
writeline()
write("The following is a list of names that might be completely unrelated to the examined Xcode projects.")
write("For something for tangible replace firstnames.txt with your own list.")
writeline()
max_firstnames_limited = print_names(args, users_per_year, emoji=args.emoji)
if args.emoji:
write("Looking for Xcode ids in project files...")
print_emoji_histo(users_per_year)
if max_firstnames_limited and args.max_firstnames is None:
write("The number of first names to consider was limited to %d, this can be changed with --max-firstnames" % max_firstnames_limited)
def print_names(args, users_per_year, emoji=False):
userhashes = defaultdict(list)
max_firstnames = args.max_firstnames
if max_firstnames is None:
max_firstnames = DEFAULT_FIRSTNAMES
max_firstnames_limited = None
with codecs.open(rel('firstnames.txt'), 'r', encoding='utf-8') as f:
firstnames = f.read().splitlines()
for idx, name in enumerate(firstnames):
if idx >= max_firstnames:
max_firstnames_limited = max_firstnames
break
userhashes[user_hash(name)].append(name)
for year, hashes in sorted(users_per_year.items()):
write(str(year), end=' ')
for h in sorted(hashes):
candidates = userhashes[h]
if candidates:
if emoji:
symbol = emojis[h] + ' '
else:
symbol = ''
write(' (%s' % symbol + ' | '.join(candidates) + ')', end=' ')
writeline()
return max_firstnames_limited
def main():
parser = argparse.ArgumentParser(description='Show some histograms for a directory a Xcode project files.')
parser.add_argument('-u', '--utcoffset', type=int, default=-8, metavar='UTCOFFSET', help='UTC time offset, e.g. "-8" for California')
parser.add_argument('--startyear', type=int, default=2006)
parser.add_argument('--endyear', type=int, default=2014)
parser.add_argument('-n', '--max-files', action='store', type=int, default=None, help='maximum number of files to process')
parser.add_argument('--max-firstnames', action='store', type=int, default=None, help='maximum number first names to consider')
parser.add_argument('--emoji', action='store_true', help='add emoji characters to userhashes')
parser.add_argument('--emojitable', action='store_true', help='only print the emoji table')
parser.add_argument('--profile', action='store_true', help='run everything through the profiler')
parser.add_argument('directory', help='directory with Xcode project files')
args = parser.parse_args()
if args.profile:
write('Profiling...')
utils.profile('call_command(args, parser)', locals(), globals())
else:
call_command(args)
def call_command(args):
histogram(args, utcoffset=args.utcoffset)
if __name__ == '__main__':
main()
|
mikr/xcodeprojer
|
examples/gidhistograms.py
|
Python
|
mit
| 9,342
|
# coding=utf-8
from __init__ import *
from dao.dbArticle import SolutionArticle
from dao.dbTag import Tag
def generate_tags(data):
tag_list = []
for tag in data:
if tag == '':
continue
has_tag = Tag.query.filter(Tag.name == tag).first()
if not has_tag:
new_tag = Tag(tag)
new_tag.save()
tag_list.append(new_tag)
else:
tag_list.append(has_tag)
return tag_list
def post(form, user, is_draft):
has = SolutionArticle.query.filter(SolutionArticle.id == form.sid.data).first()
tags = generate_tags(form.tags.data)
content_list = form.content.data.split('<-more->')
list_len = len(content_list)
if list_len > 2:
raise Exception(u'more标签的使用超过限制')
if has and has.user != user and user.is_admin == 0:
raise Exception(u'你没有权限修改该文章')
if not has:
has = SolutionArticle(form.title.data,user)
else:
has.title = form.title.data
has.last_update_time = datetime.now()
if list_len == 1 :
has.md_shortcut = content_list[0]
has.md_content = ""
elif content_list[0].strip() == "" :
has.md_shortcut = content_list[1]
has.md_content = ""
else:
has.md_shortcut = content_list[0]
has.md_content = content_list[1]
oj = form.problem_oj_name.data
pid = form.problem_pid.data
has.is_top = form.is_top.data
has.is_draft = is_draft
has.problem_oj_name = oj
has.problem_pid = pid
has.tags = tags
has.save()
def filter_query(query_type=None, keyword=''):
if query_type == 'title' and keyword != '':
query = SolutionArticle.query.filter(SolutionArticle.title.like('%' + keyword + '%'))
elif query_type == 'tag' and keyword != '':
tag_row = Tag.query.filter(Tag.name==keyword).first()
query = tag_row.solutions if tag_row else None
else:
query = SolutionArticle.query
return query
def get_list(offset=0, limit=20, user=None, query_type=None, keyword=''):
if not user:
query = filter_query(query_type, keyword)
return query.filter(SolutionArticle.is_draft==0).\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
offset(offset).limit(limit).all() if query else []
elif user.is_admin:
return SolutionArticle.query.\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
offset(offset).limit(limit).all()
elif user.is_coach:
return SolutionArticle.query.join(SolutionArticle.user)\
.filter(User.school==user.school, User.rights < 4).\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
offset(offset).limit(limit).all()
else:
return SolutionArticle.query.filter(SolutionArticle.user==user).\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
offset(offset).limit(limit).all()
def get_count(user=None, query_type=None, keyword=''):
if not user:
query = filter_query(query_type, keyword)
return query.filter(SolutionArticle.is_draft==0).\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
count() if query else 0
elif user.is_admin:
return SolutionArticle.query.\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
count()
elif user.is_coach:
return SolutionArticle.query.join(SolutionArticle.user)\
.filter(User.school==user.school, User.rights < 4).\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
count()
else:
return SolutionArticle.query.filter(SolutionArticle.user==user).\
order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc()).\
count()
def get_recent(limit=5):
return get_list(0, limit)
def get_by_id(sid):
return SolutionArticle.query.filter(SolutionArticle.id == sid).first_or_404()
def delete_by_id(sid):
SolutionArticle.query.filter(SolutionArticle.id == sid).with_lockmode('update').delete()
db.session.commit()
def get_archive():
archive = db.session\
.query(SolutionArticle.last_update_time, SolutionArticle.title, SolutionArticle.url, SolutionArticle.is_top)\
.filter(SolutionArticle.is_draft==0)\
.order_by(SolutionArticle.is_top.desc(),SolutionArticle.last_update_time.desc())\
.all()
archives = dict()
for article in archive:
year = article.last_update_time.year
if year not in archives:
archives[year] = []
archives[year].append(article)
return archives
def get_archive_by_tag(tag):
tag_row = Tag.query.filter(Tag.name==tag).first()
if not tag_row:
return None
archive = tag_row.solutions\
.filter(SolutionArticle.is_draft==0)\
.order_by(SolutionArticle.is_top.desc(), SolutionArticle.last_update_time.desc())\
.all()
archives = dict()
for article in archive:
year = article.last_update_time.year
if year not in archives:
archives[year] = []
archives[year].append(article)
return archives
def get_all_tags():
tags_row = Tag.query.filter(Tag.solutions!=None).all()
tags = []
for tag in tags_row:
if tag.solutions.filter(SolutionArticle.is_draft==0).count():
tags.append(tag)
return tags
def related_submits(article, offset=0, limit=10):
if article.problem_oj_name == '' or article.problem_pid == '':
return []
query = Submit.query.filter(Submit.oj_name==article.problem_oj_name,Submit.pro_id==article.problem_pid)\
#filter(or_(Submit.result == 'OK', Submit.result == 'Accepted')).all()
return query.offset(offset).limit(limit).all()
def related_submits_count(article):
if article.problem_oj_name == '' or article.problem_pid == '':
return 0
query = Submit.query.filter(Submit.oj_name==article.problem_oj_name,Submit.pro_id==article.problem_pid)\
#filter(or_(Submit.result == 'OK', Submit.result == 'Accepted')).all()
return query.count()
|
Raynxxx/CUIT-ACM-Website
|
server/article_server.py
|
Python
|
mit
| 6,348
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/armor/ithorian_defender/shared_ith_armor_s01_bicep_r.iff"
result.attribute_template_id = 0
result.stfName("wearables_name","ith_armor_s01_bicep_r")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/wearables/armor/ithorian_defender/shared_ith_armor_s01_bicep_r.py
|
Python
|
mit
| 494
|
# include for aggregation
from django.db.models import Case, IntegerField, Sum, Value, When
from django.db.models import CharField
# ------------------------------------------
# imports needed for the functional view
from rest_framework.response import Response
# ------------------------------------------
# ------------------------------------------
# generics class to make writing endpoints easier
from rest_framework import generics
# ------------------------------------------
# main pieces from our DRF app that need to be linked
from . import models
from . import serializers
from . import filters
# ------------------------------------------
LA_Bureaus = ['MF']
EO_Bureaus = ['MY', 'PA', 'PS', 'PW', 'PU', 'AU']
class ListOcrb(generics.ListAPIView):
"""
Operating and Capital Requirements by Bureau (OCRB).
Note: Parameter values are compared case-insensitive.
"""
serializer_class = serializers.OcrbSerializer
filter_class = filters.OcrbFilter
def get_queryset(self):
return models.OCRB.objects.order_by('-fiscal_year', 'budget_type', 'service_area', 'bureau', 'budget_category')
class OcrbSummary(generics.ListAPIView):
"""
Summarize Budget for Operating and Capital Requirements by Service Area and Bureau
"""
serializer_class = serializers.OcrbSumSerializer
filter_class = filters.OcrbSummaryFilter
def get_queryset(self):
return models.OCRB.objects.values('fiscal_year', 'service_area', 'bureau')\
.annotate(bureau_total=Sum('amount'))\
.order_by('fiscal_year', 'service_area', 'bureau')
class ListKpm(generics.ListAPIView):
"""
Key Performance Measures (KPM).
Note: Parameter values are compared case-insensitive.
"""
queryset = models.KPM.objects.all()
serializer_class = serializers.KpmSerializer
filter_class = filters.KpmFilter
class ListBudgetHistory(generics.ListAPIView):
"""
Historical Operating and Capital Requirements by Service Area and Bureau
Note: Parameter values are compared case-insensitive.
"""
serializer_class = serializers.BudgetHistorySerializer
filter_class = filters.BudgetHistoryFilter
def get_queryset(self):
return models.BudgetHistory.objects.order_by('fiscal_year', 'bureau_name', 'accounting_object_name', 'functional_area_name')
class HistorySummaryByBureau(generics.ListAPIView):
"""
Summary of Historical Operating and Capital Requirements by Service Area and Bureau
"""
serializer_class = serializers.HistorySummaryBureauSerializer
filter_class = filters.HistoryBureauFilter
def get_queryset(self):
"""
Append the calculated service area based on business logic.
(Some bureaus are in service areas not reflected by the data)
"""
qs = models.BudgetHistory.objects.all()
qs = qs.values('fiscal_year', 'service_area_code', 'bureau_code', 'bureau_name').annotate(
sa_calced=Case(
When(bureau_code__in = LA_Bureaus, then = Value('LA')),
When(bureau_code__in = EO_Bureaus, then = Value('EO')),
default = 'service_area_code',
output_field = CharField()
),
amount=Sum('amount'))
qs = qs.order_by('fiscal_year', 'service_area_code', 'bureau_code', 'bureau_name')
return qs
class HistorySummaryByServiceArea(generics.ListAPIView):
"""
Summary of BudgetHistory by Service Area.
"""
serializer_class = serializers.HistorySummaryByServiceAreaSerializer
filter_class = filters.HistoryServiceAreaFilter
def get_queryset(self):
"""
Calculate service area based on business logic.
(Some bureaus are in service areas not reflected by the data)
"""
qs = models.BudgetHistory.objects.all()
qs = qs.values('fiscal_year', ).annotate(
sa_calced=Case(
When(bureau_code__in = LA_Bureaus, then = Value('LA')),
When(bureau_code__in = EO_Bureaus, then = Value('EO')),
default = 'service_area_code',
output_field = CharField()
),
amount=Sum('amount'),
)
qs = qs.order_by('fiscal_year', 'sa_calced')
return qs
class HistorySummaryByServiceAreaObjectCode(generics.ListAPIView):
"""
Summary of Historical Operating and Capital Requirements by Service Area and Object Code
"""
serializer_class = serializers.HistorySummaryByServiceAreaObjectCodeSerializer
filter_class = filters.HistoryObjectCode
def get_queryset(self):
qs = models.BudgetHistory.objects.all()
qs = qs.values('fiscal_year', 'service_area_code', 'object_code').annotate(amount=Sum('amount'))
qs = qs.order_by('fiscal_year', 'service_area_code', 'object_code')
return qs
class ListLookupCode(generics.ListAPIView):
"""
Code reference table for Budget History.
Note: Parameter values are compared case-insensitive.
"""
serializer_class = serializers.LookupCodeSerializer
filter_class = filters.LookupCodeFilter
def get_queryset(self):
return models.LookupCode.objects.all()
|
hackoregon/team-budget
|
budget_proj/budget_app/views.py
|
Python
|
mit
| 5,220
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Font(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.yaxis.title"
_path_str = "layout.yaxis.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.yaxis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.yaxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.yaxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/layout/yaxis/title/_font.py
|
Python
|
mit
| 8,542
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/food/shared_drink_aludium_pu36.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/food/shared_drink_aludium_pu36.py
|
Python
|
mit
| 452
|
"""Fetch Massacheusetts Department of Transportation feeds.
MassDOT supplies the feeds for MA not covered by MBTA (Boston's transit authority).
http://www.massdot.state.ma.us/DevelopersData.aspx
"""
import logging
from FeedSource import FeedSource
BASE_URL = 'http://www.massdot.state.ma.us/Portals/0/docs/developers/'
LOG = logging.getLogger(__name__)
class Massdot(FeedSource):
"""Fetch MassDOT (MA, non-Boston) feeds."""
def __init__(self):
super(Massdot, self).__init__()
berkshire_url = '%sbrta_google_transit.zip' % BASE_URL
brockton_url = '%sbat_google_transit.zip' % BASE_URL
cape_ann_url = '%scata_google_transit.zip' % BASE_URL
cape_cod_url = '%sccrta_google_transit.zip' % BASE_URL
franklin_url = '%sfrta_google_transit.zip' % BASE_URL
attleboro_url = '%sgatra_google_transit.zip' % BASE_URL
lowell_url = '%slrta_google_transit.zip' % BASE_URL
merrimack_url = '%smvrta_google_transit.zip' % BASE_URL
metrowest_url = '%smwrta_google_transit.zip' % BASE_URL
montachusett_url = '%smart_google_transit.zip' % BASE_URL
nantucket_url = '%snrta_google_transit.zip' % BASE_URL
pioneer_valley_url = 'http://www.pvta.com/g_trans/google_transit.zip'
southeastern_url = '%ssrta_google_transit.zip' % BASE_URL
vineyard_url = '%svta_google_transit.zip' % BASE_URL
worchester_url = '%swrta_google_transit.zip' % BASE_URL
ma_ferry_url = '%sferries_google_transit.zip' % BASE_URL
# private bus services; these feeds tend to have validation issues
bloom_url = '%sBloom_google_transit.zip' % BASE_URL
boston_express_url = '%sboston_express_google_transit.zip' % BASE_URL
coach_bus_url = '%scoach_google_transit.zip' % BASE_URL
dattco_url = '%sdattco_google_transit.zip' % BASE_URL
peter_pan_url = '%speter_pan_google_transit.zip' % BASE_URL
plymouth_brockton_railway_url = '%sPB_google_transit.zip' % BASE_URL
yankee_url = '%syankee_google_transit.zip' % BASE_URL
self.urls = {
'berkshire.zip': berkshire_url,
'brockton.zip': brockton_url,
'cape_ann.zip': cape_ann_url,
'cape_cod.zip': cape_cod_url,
'franklin.zip': franklin_url,
'attleboro.zip': attleboro_url,
'lowell.zip': lowell_url,
'merrimack.zip': merrimack_url,
'metrowest.zip': metrowest_url,
'montachusett.zip': montachusett_url,
'nantucket.zip': nantucket_url,
'pioneer_valley.zip': pioneer_valley_url,
'southeastern_ma.zip': southeastern_url,
'vineyard_ma.zip': vineyard_url,
'worchester.zip': worchester_url,
'ma_ferries.zip': ma_ferry_url,
'bloom_ma.zip': bloom_url,
'boston_express.zip': boston_express_url,
'coach_bus_ma.zip': coach_bus_url,
'dattco_ma.zip': dattco_url,
'peter_pan_ma.zip': peter_pan_url,
'plymouth_brockton_rail.zip': plymouth_brockton_railway_url,
'yankee_ma.zip': yankee_url
}
|
azavea/gtfs-feed-fetcher
|
feed_sources/Massdot.py
|
Python
|
mit
| 3,164
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_indivdual.py
Test the individual class.
Created by Måns Magnusson on 2013-03-07.
Copyright (c) 2013 __MyCompanyName__. All rights reserved.
"""
import sys
import os
from ped_parser import individual
class TestIndividual(object):
"""Test class for testing how the individual class behave"""
def setup_class(self):
"""Setup a simple family with family id 1, sick daughter id 1, healthy father id 2, healthy mother id 3"""
self.daughter = individual.Individual(
ind='1',
family='1',
mother='3',
father='2',
sex=2,
phenotype=2
)
self.father = individual.Individual(
ind='2',
family='1',
mother='0',
father='0',
sex=1,
phenotype=1
)
self.mother = individual.Individual(
ind='3',
family='1',
mother='0',
father='0',
sex=2,
phenotype=1
)
self.random_individual = individual.Individual(ind='0')
def test_daughter(self):
"""Test if the information about the daughter comes out correctly."""
assert self.daughter.affected
assert self.daughter.has_parents
assert self.daughter.sex == 2
def test_father(self):
"""Test if the information about the father comes out correctly."""
assert not self.father.affected
assert not self.father.has_parents
assert self.father.sex == 1
def test_mother(self):
"""Test if the information about the mother comes out correctly."""
assert not self.mother.affected
assert not self.mother.has_parents
assert self.mother.sex == 2
def test_random_individual(self):
"""Test if the information about the father comes out correctly."""
assert not self.random_individual.affected
assert not self.random_individual.has_parents
assert self.random_individual.sex == 0
def main():
pass
if __name__ == '__main__':
main()
|
willtownes/ped_parser
|
tests/test_individual.py
|
Python
|
mit
| 2,845
|
# This is an example of popping a packet from the Emotiv class's packet queue
# and printing the gyro x and y values to the console.
from emokit.emotiv import Emotiv
import platform
if platform.system() == "Windows":
import socket # Needed to prevent gevent crashing on Windows. (surfly / gevent issue #459)
import gevent
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
is_running = True
def evt_main(ring_buf):
headset = Emotiv()
gevent.spawn(headset.setup)
gevent.sleep(0)
pos = 0
try:
while True:
packet = headset.dequeue()
print packet.gyro_x, packet.gyro_y
ring_buf[pos] = packet.gyro_x
if pos % 4 == 0:
yield ring_buf
pos = (pos + 1) % 1024
gevent.sleep(0)
except KeyboardInterrupt:
headset.close()
finally:
is_running = False
headset.close()
x = np.linspace(0, 1023, 1024)
test_buf = np.zeros(1024)
fig, ax = plt.subplots()
line, = ax.plot(x, test_buf)
plt.axis([0, 1024, -100, 100])
def evt_wrapper():
def gen():
return evt_main(test_buf)
return gen
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate(rb):
print "Animation!"
print rb
line.set_ydata(rb)
return line,
def counter():
i = 0
while is_running:
yield i
i = i + 1
ani = animation.FuncAnimation(fig, animate, evt_wrapper(), init_func=init, interval=20, blit=True)
plt.show()
# gevent.Greenlet.spawn(evt_main, test_buf)
while True:
gevent.sleep(0)
|
cactorium/UCFBrainStuff
|
seniordesign/emokit/gyro_plot.py
|
Python
|
mit
| 1,625
|
"Messages used to internally control thesplog settings."
from thespian.actors import ActorSystemMessage
class SetLogging(ActorSystemMessage):
def __init__(self, threshold, useLogging, useFile):
self.threshold = threshold
self.useLogging = useLogging
self.useFile = useFile
|
kquick/Thespian
|
thespian/system/messages/logcontrol.py
|
Python
|
mit
| 308
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listsincelast RPC."""
from test_framework.test_framework import StarwelsTestFramework
from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error
class ListSinceBlockTest (StarwelsTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def run_test(self):
self.nodes[2].generate(101)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
def test_no_blockhash(self):
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
def test_invalid_blockhash(self):
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"invalid-hex")
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
self.log.info('lastblockhash=%s' % (lastblockhash))
self.sync_all([self.nodes[:2], self.nodes[2:]])
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert found
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives USDH in tx1 from utxo1 in block aa1.
2. User 2 receives USDH in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.sync_all()
# Split network into two
self.split_network()
# share utxo between nodes[1] and nodes[2]
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
privkey = self.nodes[2].dumpprivkey(utxo['address'])
self.nodes[1].importprivkey(privkey)
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipientDict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxoDicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransaction(
self.nodes[1].createrawtransaction(utxoDicts, recipientDict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipientDict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransaction(
self.nodes[2].createrawtransaction(utxoDicts, recipientDict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmations count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipientDict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxoDicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransaction(
self.nodes[2].createrawtransaction(utxoDicts, recipientDict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
self.nodes[0].gettransaction(txid1)
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
starwels/starwels
|
test/functional/wallet_listsinceblock.py
|
Python
|
mit
| 9,614
|
from nose.tools import *
from localimport import localimport
import os
import sys
modules_dir = os.path.join(os.path.dirname(__file__), 'modules')
def test_localimport_with_autodisable():
sys.path.append(modules_dir)
import another_module as mod_a
try:
with localimport('modules') as _imp:
import some_module
import another_module as mod_b
assert 'some_module' in sys.modules
assert sys.modules['another_module'] is mod_b
assert 'some_module' not in sys.modules
assert sys.modules['another_module'] is mod_a
assert mod_a is not mod_b
finally:
sys.path.remove(modules_dir)
del sys.modules['another_module']
def test_localimport_without_autodisable():
sys.path.append(modules_dir)
import another_module as mod_a
try:
with localimport('modules', do_autodisable=False) as _imp:
import some_module
import another_module as mod_b
assert 'some_module' in sys.modules
assert sys.modules['another_module'] is mod_b
assert mod_a is mod_b
assert 'some_module' not in sys.modules
assert sys.modules['another_module'] is mod_a
finally:
sys.path.remove(modules_dir)
del sys.modules['another_module']
def test_localimpot_parent_dir():
with localimport('.', parent_dir=modules_dir) as _imp:
import some_module
assert 'some_module' not in sys.modules
assert 'another_module' not in sys.modules
def test_localimpot_curdir():
with localimport('.') as _imp:
import some_module
assert 'some_module' not in sys.modules
assert 'another_module' not in sys.modules
def test_discover():
with localimport('.') as _imp:
assert_equals(sorted(x.name for x in _imp.discover()), ['another_module', 'some_module', 'test_localimport'])
with localimport('modules') as _imp:
assert_equals(sorted(x.name for x in _imp.discover()), ['another_module', 'some_module'])
|
NiklasRosenstein/localimport
|
tests/test_localimport.py
|
Python
|
mit
| 1,885
|
import demistomock as demisto
incident = demisto.incidents()
data = {
"Type": 17,
"ContentsFormat": "pie",
"Contents": {
"stats": [
{
"data": [
int(incident[0].get('CustomFields', {}).get('xdrhighseverityalertcount', 0))
],
"groups": None,
"name": "high",
"label": "incident.severity.high",
"color": "rgb(255, 23, 68)"
},
{
"data": [
int(incident[0].get('CustomFields', {}).get('xdrmediumseverityalertcount', 0))
],
"groups": None,
"name": "medium",
"label": "incident.severity.medium",
"color": "rgb(255, 144, 0)"
},
{
"data": [
int(incident[0].get('CustomFields', {}).get('xdrlowseverityalertcount', 0))
],
"groups": None,
"name": "low",
"label": "incident.severity.low",
"color": "rgb(0, 205, 51)"
},
],
"params": {
"layout": "horizontal"
}
}
}
demisto.results(data)
|
demisto/content
|
Packs/CortexXDR/Scripts/EntryWidgetPieAlertsXDR/EntryWidgetPieAlertsXDR.py
|
Python
|
mit
| 1,252
|
from lib import incrementer
print(incrementer(2))
|
pauleveritt/pycharm_polyglot
|
src/modules/app.py
|
Python
|
mit
| 49
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interactive widgets for the Jupyter notebook.
Provide simple interactive controls in the notebook.
Each Widget corresponds to an object in Python and Javascript,
with controls on the page.
To put a Widget on the page, you can display it with IPython's display machinery::
from ipywidgets import IntSlider
from IPython.display import display
slider = IntSlider(min=1, max=10)
display(slider)
Moving the slider will change the value. Most Widgets have a current value,
accessible as a `value` attribute.
"""
import os
from IPython import get_ipython
from ._version import version_info, __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__
from .widgets import *
from traitlets import link, dlink
def load_ipython_extension(ip):
"""Set up IPython to work with widgets"""
if not hasattr(ip, 'kernel'):
return
register_comm_target(ip.kernel)
def register_comm_target(kernel=None):
"""Register the jupyter.widget comm target"""
if kernel is None:
kernel = get_ipython().kernel
kernel.comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened)
# deprecated alias
handle_kernel = register_comm_target
def _handle_ipython():
"""Register with the comm target at import if running in IPython"""
ip = get_ipython()
if ip is None:
return
load_ipython_extension(ip)
_handle_ipython()
|
sserrot/champion_relationships
|
venv/Lib/site-packages/ipywidgets/__init__.py
|
Python
|
mit
| 1,536
|
"""
Scrapy settings for RecipesScraper project.
"""
# Names
BOT_NAME = 'RecipesScraper'
SPIDER_MODULES = ['RecipesScraper.spiders']
NEWSPIDER_MODULE = 'RecipesScraper.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Configure item pipelines
ITEM_PIPELINES = {
'RecipesScraper.pipelines.JsonPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 3
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
|
brandonmburroughs/food2vec
|
dat/RecipesScraper/RecipesScraper/settings.py
|
Python
|
mit
| 864
|
#!/usr/bin/python
# Script pour telecharger City
import MySQLdb
file_ = open('city.csv', 'w')
file_.write ('city_id,city,country_id\n')
db = MySQLdb.connect( user='etudiants',
passwd='etudiants_1',
host='192.168.99.100',
db='sakila')
cur = db.cursor()
cur.execute("SELECT * FROM city")
for row in cur.fetchall():
file_.write(str(row[0])+','+ row[1]+','+ str(row[2])+'\n')
db.close()
file_.close()
|
setrar/INF1069
|
C.PYTHON/mysql.py
|
Python
|
mit
| 475
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ObservationTools documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 30 14:32:48 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ObservationTools'
copyright = '2017, IA'
author = 'IA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ObservationToolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ObservationTools.tex', 'ObservationTools Documentation',
'IA', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'observationtools', 'ObservationTools Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ObservationTools', 'ObservationTools Documentation',
author, 'ObservationTools', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
iastro-pt/ObservationTools
|
docs/source/conf.py
|
Python
|
mit
| 5,035
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/event_perk/shared_fire_pit_deed.iff"
result.attribute_template_id = 2
result.stfName("event_perk","fire_pit_name")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/deed/event_perk/shared_fire_pit_deed.py
|
Python
|
mit
| 456
|
#!/usr/bin/env python
"""
Given an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.
near_hundred(93) == True
near_hundred(90) == True
near_hundred(89) == False
"""
def near_hundred(n):
return 190 <= abs(n) <= 210 or 90 <= abs(n) <= 110
def test_function():
assert near_hundred(93) == True
assert near_hundred(90) == True
assert near_hundred(89) == False
assert near_hundred(110) == True
assert near_hundred(111) == False
assert near_hundred(121) == False
assert near_hundred(0) == False
assert near_hundred(5) == False
assert near_hundred(191) == True
assert near_hundred(189) == False
assert near_hundred(190) == True
assert near_hundred(200) == True
assert near_hundred(210) == True
assert near_hundred(211) == False
assert near_hundred(290) == False
if __name__ == '__main__':
test_function()
|
marshallhumble/Euler_Groovy
|
Coding_Bat/Python/Warmup_1/near_hundred.py
|
Python
|
mit
| 932
|
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'django_netjsongraph.db',
}
}
SECRET_KEY = 'fn)t*+$)ugeyip6-#txyy$5wf2ervc0d2n#h)qb)y5@ly$t*@w'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'openwisp_utils.admin_theme',
'django_netjsongraph',
'django.contrib.admin',
# rest framework
'rest_framework',
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'openwisp_utils.staticfiles.DependencyFinder',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TIME_ZONE = 'Europe/Rome'
LANGUAGE_CODE = 'en-gb'
USE_TZ = True
USE_I18N = False
USE_L10N = False
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse',},
'require_debug_true': {'()': 'django.utils.log.RequireDebugTrue',},
},
'formatters': {
'simple': {'format': '[%(levelname)s] %(message)s'},
'verbose': {
'format': '\n\n[%(levelname)s %(asctime)s] module: %(module)s, process: %(process)d, thread: %(thread)d\n%(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'simple',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
'main_log': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(BASE_DIR, 'error.log'),
'maxBytes': 5242880.0,
'backupCount': 3,
'formatter': 'verbose',
},
},
'root': {'level': 'INFO', 'handlers': ['main_log', 'console', 'mail_admins'],},
'loggers': {'py.warnings': {'handlers': ['console'],}},
}
TEST_RUNNER = "django_netjsongraph.tests.utils.LoggingDisabledTestRunner"
# local settings must be imported before test runner otherwise they'll be ignored
try:
from local_settings import *
except ImportError:
pass
|
interop-dev/django-netjsongraph
|
tests/settings.py
|
Python
|
mit
| 3,592
|
#!/usr/local/bin/python
import os
# import ycm_core
# return the filename in the path without extension
def findFileName(path, ext):
name = ''
for projFile in os.listdir(path):
# cocoapods will generate _Pods.xcodeproj as well
if projFile.endswith(ext) and not projFile.startswith('_Pods'):
name= projFile[:-len(ext):]
return name
# WARNING!! No / in the end
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def findProjectName(working_directory):
projectName = findFileName(working_directory, '.xcodeproj')
if len(projectName) <= 0:
# cocoapod projects
projectName = findFileName(working_directory, '.podspec')
return projectName
flags = [
# TODO: find the correct cache path automatically
'-D__IPHONE_OS_VERSION_MIN_REQUIRED=80000',
'-miphoneos-version-min=9.3',
'-arch', 'arm64',
'-fblocks',
'-fmodules',
'-fobjc-arc',
'-fobjc-exceptions',
'-fexceptions',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include/c++/v1', # for c++ headers <string>, <iostream> definition
'-x',
'objective-c',
'-Wno-#pragma-messages',
'-Wno-#warnings',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks',
# '-I/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks/Foundation.framework/Headers',
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
# '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1'
# '-I/Library/Developer/CommandLineTools/usr/include',
#custom definition, include subfolders
'-ProductFrameworkInclude', # include the framework in the products(in derivedData) folder
'-I./Example/'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
'-ISUB./Pod/Classes', # old cocoapods directory
'-ISUB./'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
# use headers in framework instead
#'-ISUB./Example/Pods', # new cocoapods directory
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/Kiwi/',
# '-include',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
'-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk'
# '-fencode-extended-block-signature', #libclang may report error on this
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include', # let IncludeClangInXCToolChain handle it
# include-pch will make YouCompleteMe show 'no errors founded'
# '-include-pch',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
# modules failed trials
# '-fmodule-implementation-of',
# '-fimplicit-module-maps',
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/CocoaLumberjack',
# '-Wnon-modular-include-in-framework-module',
]
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# if os.path.exists( compilation_database_folder ):
# database = ycm_core.CompilationDatabase( compilation_database_folder )
# else:
# we don't use compilation database
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def Subdirectories(directory):
res = []
for path, subdirs, files in os.walk(directory):
for name in subdirs:
item = os.path.join(path, name)
res.append(item)
return res
def sorted_ls(path):
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
def IncludeClangInXCToolChain(flags, working_directory):
if not working_directory:
return list( flags )
new_flags = list(flags)
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include',
path = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/'
clangPath = sorted_ls(path)[::-1] # newest file first
includePath = ''
if (len(clangPath) > 0):
includePath = os.path.join('', *[path, clangPath[0], 'include'])
new_flags.append('-I'+includePath)
return new_flags
def FindDerivedDataPath( derivedDataPath, projectName ):
simulatorPaths = ['Build/Intermediates/CodeCoverage/Products/Debug-iphonesimulator/', # if you enable CodeCoverage, the framework of test target will be put in coverage folder, strange
'Build/Products/Debug-iphonesimulator/']
# search ~/Library/Developer/Xcode/DerivedData/ to find <project_name>-dliwlpgcvwijijcdxarawwtrfuuh
derivedPath = sorted_ls(derivedDataPath)[::-1] # newest file first
for productPath in derivedPath:
if productPath.lower().startswith( projectName.lower() ):
for simulatorPath in simulatorPaths:
projectPath = os.path.join('', *[derivedDataPath, productPath, simulatorPath])
if (len(projectPath) > 0) and os.path.exists(projectPath):
return projectPath # the lastest product is what we want (really?)
return ''
def IncludeFlagsOfFrameworkHeaders( flags, working_directory ):
if not working_directory:
return flags
new_flags = []
path_flag = '-ProductFrameworkInclude'
derivedDataPath = os.path.expanduser('~/Library/Developer/Xcode/DerivedData/')
# find the project name
projectName = findProjectName(working_directory)
if len(projectName) <= 0:
return flags
# add all frameworks in the /Build/Products/Debug-iphonesimulator/xxx/xxx.framework
for flag in flags:
if not flag.startswith( path_flag ):
new_flags.append(flag)
continue
projectPath = FindDerivedDataPath( derivedDataPath, projectName )
if (len(projectPath) <= 0) or not os.path.exists(projectPath):
continue
# iterate through all frameworks folders /Debug-iphonesimulator/xxx/xxx.framework
for frameworkFolder in os.listdir(projectPath):
frameworkPath = os.path.join('', projectPath, frameworkFolder)
if not os.path.isdir(frameworkPath):
continue
# framwork folder '-F/Debug-iphonesimulator/<framework-name>'
# solve <Kiwi/KiwiConfigurations.h> not found problem
new_flags.append('-F'+frameworkPath)
# the framework name might be different than folder name
# we need to iterate all frameworks
for frameworkFile in os.listdir(frameworkPath):
if frameworkFile.endswith('framework'):
# include headers '-I/Debug-iphonesimulator/xxx/yyy.framework/Headers'
# allow you to use #import "Kiwi.h". NOT REQUIRED, but I am too lazy to change existing codes
new_flags.append('-I' + os.path.join('', frameworkPath, frameworkFile,'Headers'))
return new_flags
def IncludeFlagsOfSubdirectory( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_include_subdir = False
path_flags = [ '-ISUB']
for flag in flags:
# include the directory of flag as well
new_flag = [flag.replace('-ISUB', '-I')]
if make_next_include_subdir:
make_next_include_subdir = False
for subdir in Subdirectories(os.path.join(working_directory, flag)):
new_flag.append('-I')
new_flag.append(subdir)
for path_flag in path_flags:
if flag == path_flag:
make_next_include_subdir = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
for subdir in Subdirectories(os.path.join(working_directory, path)):
new_flag.append('-I' + subdir)
break
new_flags =new_flags + new_flag
return new_flags
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
#add include subfolders as well
flags = IncludeFlagsOfSubdirectory( flags, working_directory )
#include framework header in derivedData/.../Products
flags = IncludeFlagsOfFrameworkHeaders( flags, working_directory )
#include libclang header in xctoolchain
flags = IncludeClangInXCToolChain( flags, working_directory )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
import time
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
# update .clang for chromatica every 5min TODO: very dirty
chromatica_file = DirectoryOfThisScript() + '/.clang'
if (not os.path.exists(chromatica_file)) or (time.time() - os.stat(chromatica_file).st_mtime > 5*60):
parsed_flags = IncludeFlagsOfSubdirectory( final_flags, DirectoryOfThisScript() )
escaped = [flag for flag in parsed_flags if " " not in flag] # chromatica doesn't handle space in flag
f = open(chromatica_file, 'w') # truncate the current file
f.write('flags='+' '.join(escaped))
f.close()
return {
'flags': final_flags,
'do_cache': True
}
# if __name__ == "__main__":
# print (FlagsForFile(""))
# flags = [
# '-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
# '-x',
# 'objective-c',
# '-ProductFrameworkInclude',
# '-ProductFrameworkInclude',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-ISUB./Pods/Headers/Public',
# '-MMD',
# ]
# print IncludeClangInXCToolChain(flags, DirectoryOfThisScript())
# print IncludeFlagsOfFrameworkHeaders( flags, DirectoryOfThisScript() )
# # res = subdirectory( DirectoryOfThisScript())
# flags = [
# '-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
# '-x',
# 'objective-c',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-ISUB./Pods/Headers/Public',
# '-MMD',
# ]
# print (IncludeFlagsOfSubdirectory( flags, DirectoryOfThisScript() ))
# res = IncludeFlagsOfSubdirectory( flags, DirectoryOfThisScript() )
# escaped = []
# for flag in res:
# if " " not in flag:
# escaped.append(flag)
# print ' '.join(escaped)
|
haifengkao/ReactiveCache
|
.ycm_extra_conf.py
|
Python
|
mit
| 13,022
|
# Program make a simple calculator that can add, subtract, multiply and divide using functions
# define functions
def add(x, y):
"""This function adds two numbers"""
return x + y
def subtract(x, y):
"""This function subtracts two numbers"""
return x - y
def multiply(x, y):
"""This function multiplies two numbers"""
return x * y
def divide(x, y):
"""This function divides two numbers"""
return x / y
# take input from the user
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
choice = input("Enter choice(1/2/3/4):")
num1 = int(input("Enter first number: "))
num2 = int(input("Enter second number: "))
if choice == '1':
print(num1,"+",num2,"=", add(num1,num2))
elif choice == '2':
print(num1,"-",num2,"=", subtract(num1,num2))
elif choice == '3':
print(num1,"*",num2,"=", multiply(num1,num2))
elif choice == '4':
print(num1,"/",num2,"=", divide(num1,num2))
else:
print("Invalid input")
|
janusnic/21v-python
|
unit_02/calc/1.py
|
Python
|
mit
| 991
|
from __future__ import absolute_import
import Cookie
import copy
import threading
import time
import urllib
import urlparse
from email.utils import parsedate_tz, formatdate, mktime_tz
import netlib
from netlib import http, tcp, odict, utils
from netlib.http import cookies
from .tcp import TCPHandler
from .primitives import KILL, ProtocolHandler, Flow, Error
from ..proxy.connection import ServerConnection
from .. import encoding, utils, controller, stateobject, proxy
HDR_FORM_URLENCODED = "application/x-www-form-urlencoded"
HDR_FORM_MULTIPART = "multipart/form-data"
CONTENT_MISSING = 0
class KillSignal(Exception):
pass
def send_connect_request(conn, host, port, update_state=True):
upstream_request = HTTPRequest(
"authority",
"CONNECT",
None,
host,
port,
None,
(1, 1),
odict.ODictCaseless(),
""
)
conn.send(upstream_request.assemble())
resp = HTTPResponse.from_stream(conn.rfile, upstream_request.method)
if resp.code != 200:
raise proxy.ProxyError(resp.code,
"Cannot establish SSL " +
"connection with upstream proxy: \r\n" +
str(resp.assemble()))
if update_state:
conn.state.append(("http", {
"state": "connect",
"host": host,
"port": port}
))
return resp
class decoded(object):
"""
A context manager that decodes a request or response, and then
re-encodes it with the same encoding after execution of the block.
Example:
with decoded(request):
request.content = request.content.replace("foo", "bar")
"""
def __init__(self, o):
self.o = o
ce = o.headers.get_first("content-encoding")
if ce in encoding.ENCODINGS:
self.ce = ce
else:
self.ce = None
def __enter__(self):
if self.ce:
self.o.decode()
def __exit__(self, type, value, tb):
if self.ce:
self.o.encode(self.ce)
class HTTPMessage(stateobject.StateObject):
"""
Base class for HTTPRequest and HTTPResponse
"""
def __init__(self, httpversion, headers, content, timestamp_start=None,
timestamp_end=None):
self.httpversion = httpversion
self.headers = headers
"""@type: odict.ODictCaseless"""
self.content = content
self.timestamp_start = timestamp_start
self.timestamp_end = timestamp_end
_stateobject_attributes = dict(
httpversion=tuple,
headers=odict.ODictCaseless,
content=str,
timestamp_start=float,
timestamp_end=float
)
_stateobject_long_attributes = {"content"}
def get_state(self, short=False):
ret = super(HTTPMessage, self).get_state(short)
if short:
if self.content:
ret["contentLength"] = len(self.content)
elif self.content == CONTENT_MISSING:
ret["contentLength"] = None
else:
ret["contentLength"] = 0
return ret
def get_decoded_content(self):
"""
Returns the decoded content based on the current Content-Encoding
header.
Doesn't change the message iteself or its headers.
"""
ce = self.headers.get_first("content-encoding")
if not self.content or ce not in encoding.ENCODINGS:
return self.content
return encoding.decode(ce, self.content)
def decode(self):
"""
Decodes content based on the current Content-Encoding header, then
removes the header. If there is no Content-Encoding header, no
action is taken.
Returns True if decoding succeeded, False otherwise.
"""
ce = self.headers.get_first("content-encoding")
if not self.content or ce not in encoding.ENCODINGS:
return False
data = encoding.decode(ce, self.content)
if data is None:
return False
self.content = data
del self.headers["content-encoding"]
return True
def encode(self, e):
"""
Encodes content with the encoding e, where e is "gzip", "deflate"
or "identity".
"""
# FIXME: Error if there's an existing encoding header?
self.content = encoding.encode(e, self.content)
self.headers["content-encoding"] = [e]
def size(self, **kwargs):
"""
Size in bytes of a fully rendered message, including headers and
HTTP lead-in.
"""
hl = len(self._assemble_head(**kwargs))
if self.content:
return hl + len(self.content)
else:
return hl
def copy(self):
c = copy.copy(self)
c.headers = self.headers.copy()
return c
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both the headers
and the body of the message. Encoded content will be decoded
before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
with decoded(self):
self.content, c = utils.safe_subn(
pattern, repl, self.content, *args, **kwargs
)
c += self.headers.replace(pattern, repl, *args, **kwargs)
return c
def _assemble_first_line(self):
"""
Returns the assembled request/response line
"""
raise NotImplementedError() # pragma: nocover
def _assemble_headers(self):
"""
Returns the assembled headers
"""
raise NotImplementedError() # pragma: nocover
def _assemble_head(self):
"""
Returns the assembled request/response line plus headers
"""
raise NotImplementedError() # pragma: nocover
def assemble(self):
"""
Returns the assembled request/response
"""
raise NotImplementedError() # pragma: nocover
class HTTPRequest(HTTPMessage):
"""
An HTTP request.
Exposes the following attributes:
method: HTTP method
scheme: URL scheme (http/https)
host: Target hostname of the request. This is not neccessarily the
directy upstream server (which could be another proxy), but it's always
the target server we want to reach at the end. This attribute is either
inferred from the request itself (absolute-form, authority-form) or from
the connection metadata (e.g. the host in reverse proxy mode).
port: Destination port
path: Path portion of the URL (not present in authority-form)
httpversion: HTTP version tuple, e.g. (1,1)
headers: odict.ODictCaseless object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
form_in: The request form which mitmproxy has received. The following
values are possible:
- relative (GET /index.html, OPTIONS *) (covers origin form and
asterisk form)
- absolute (GET http://example.com:80/index.html)
- authority-form (CONNECT example.com:443)
Details: http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-25#section-5.3
form_out: The request form which mitmproxy will send out to the
destination
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
form_in,
method,
scheme,
host,
port,
path,
httpversion,
headers,
content,
timestamp_start=None,
timestamp_end=None,
form_out=None
):
assert isinstance(headers, odict.ODictCaseless) or not headers
HTTPMessage.__init__(
self,
httpversion,
headers,
content,
timestamp_start,
timestamp_end
)
self.form_in = form_in
self.method = method
self.scheme = scheme
self.host = host
self.port = port
self.path = path
self.httpversion = httpversion
self.form_out = form_out or form_in
# Have this request's cookies been modified by sticky cookies or auth?
self.stickycookie = False
self.stickyauth = False
# Is this request replayed?
self.is_replay = False
_stateobject_attributes = HTTPMessage._stateobject_attributes.copy()
_stateobject_attributes.update(
form_in=str,
method=str,
scheme=str,
host=str,
port=int,
path=str,
form_out=str,
is_replay=bool
)
@property
def body(self):
return self.content
@classmethod
def from_state(cls, state):
f = cls(
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None)
f.load_state(state)
return f
def __repr__(self):
return "<HTTPRequest: {0}>".format(
self._assemble_first_line(self.form_in)[:-9]
)
@classmethod
def from_stream(
cls,
rfile,
include_body=True,
body_size_limit=None,
wfile=None):
"""
Parse an HTTP request from a file stream
Args:
rfile (file): Input file to read from
include_body (bool): Read response body as well
body_size_limit (bool): Maximum body size
wfile (file): If specified, HTTP Expect headers are handled automatically.
by writing a HTTP 100 CONTINUE response to the stream.
Returns:
HTTPRequest: The HTTP request
Raises:
HttpError: If the input is invalid.
"""
timestamp_start, timestamp_end = None, None
timestamp_start = utils.timestamp()
if hasattr(rfile, "reset_timestamps"):
rfile.reset_timestamps()
protocol = http.http1.HTTP1Protocol(rfile=rfile, wfile=wfile)
req = protocol.read_request(
include_body = include_body,
body_size_limit = body_size_limit,
)
if hasattr(rfile, "first_byte_timestamp"):
# more accurate timestamp_start
timestamp_start = rfile.first_byte_timestamp
timestamp_end = utils.timestamp()
return HTTPRequest(
req.form_in,
req.method,
req.scheme,
req.host,
req.port,
req.path,
req.httpversion,
req.headers,
req.body,
timestamp_start,
timestamp_end
)
def _assemble_first_line(self, form=None):
form = form or self.form_out
if form == "relative":
request_line = '%s %s HTTP/%s.%s' % (
self.method, self.path, self.httpversion[0], self.httpversion[1]
)
elif form == "authority":
request_line = '%s %s:%s HTTP/%s.%s' % (
self.method, self.host, self.port, self.httpversion[0],
self.httpversion[1]
)
elif form == "absolute":
request_line = '%s %s://%s:%s%s HTTP/%s.%s' % (
self.method, self.scheme, self.host,
self.port, self.path, self.httpversion[0],
self.httpversion[1]
)
else:
raise http.HttpError(400, "Invalid request form")
return request_line
# This list is adopted legacy code.
# We probably don't need to strip off keep-alive.
_headers_to_strip_off = ['Proxy-Connection',
'Keep-Alive',
'Connection',
'Transfer-Encoding',
'Upgrade']
def _assemble_headers(self):
headers = self.headers.copy()
for k in self._headers_to_strip_off:
del headers[k]
if 'host' not in headers and self.scheme and self.host and self.port:
headers["Host"] = [utils.hostport(self.scheme,
self.host,
self.port)]
# If content is defined (i.e. not None or CONTENT_MISSING), we always
# add a content-length header.
if self.content or self.content == "":
headers["Content-Length"] = [str(len(self.content))]
return headers.format()
def _assemble_head(self, form=None):
return "%s\r\n%s\r\n" % (
self._assemble_first_line(form), self._assemble_headers()
)
def assemble(self, form=None):
"""
Assembles the request for transmission to the server. We make some
modifications to make sure interception works properly.
Raises an Exception if the request cannot be assembled.
"""
if self.content == CONTENT_MISSING:
raise proxy.ProxyError(
502,
"Cannot assemble flow with CONTENT_MISSING"
)
head = self._assemble_head(form)
if self.content:
return head + self.content
else:
return head
def __hash__(self):
return id(self)
def anticache(self):
"""
Modifies this request to remove headers that might produce a cached
response. That is, we remove ETags and If-Modified-Since headers.
"""
delheaders = [
"if-modified-since",
"if-none-match",
]
for i in delheaders:
del self.headers[i]
def anticomp(self):
"""
Modifies this request to remove headers that will compress the
resource's data.
"""
self.headers["accept-encoding"] = ["identity"]
def constrain_encoding(self):
"""
Limits the permissible Accept-Encoding values, based on what we can
decode appropriately.
"""
if self.headers["accept-encoding"]:
self.headers["accept-encoding"] = [
', '.join(
e for e in encoding.ENCODINGS if e in self.headers["accept-encoding"][0])]
def update_host_header(self):
"""
Update the host header to reflect the current target.
"""
self.headers["Host"] = [self.host]
def get_form(self):
"""
Retrieves the URL-encoded or multipart form data, returning an ODict object.
Returns an empty ODict if there is no data or the content-type
indicates non-form data.
"""
if self.content:
if self.headers.in_any("content-type", HDR_FORM_URLENCODED, True):
return self.get_form_urlencoded()
elif self.headers.in_any("content-type", HDR_FORM_MULTIPART, True):
return self.get_form_multipart()
return odict.ODict([])
def get_form_urlencoded(self):
"""
Retrieves the URL-encoded form data, returning an ODict object.
Returns an empty ODict if there is no data or the content-type
indicates non-form data.
"""
if self.content and self.headers.in_any(
"content-type",
HDR_FORM_URLENCODED,
True):
return odict.ODict(utils.urldecode(self.content))
return odict.ODict([])
def get_form_multipart(self):
if self.content and self.headers.in_any(
"content-type",
HDR_FORM_MULTIPART,
True):
return odict.ODict(
utils.multipartdecode(
self.headers,
self.content))
return odict.ODict([])
def set_form_urlencoded(self, odict):
"""
Sets the body to the URL-encoded form data, and adds the
appropriate content-type header. Note that this will destory the
existing body if there is one.
"""
# FIXME: If there's an existing content-type header indicating a
# url-encoded form, leave it alone.
self.headers["Content-Type"] = [HDR_FORM_URLENCODED]
self.content = utils.urlencode(odict.lst)
def get_path_components(self):
"""
Returns the path components of the URL as a list of strings.
Components are unquoted.
"""
_, _, path, _, _, _ = urlparse.urlparse(self.url)
return [urllib.unquote(i) for i in path.split("/") if i]
def set_path_components(self, lst):
"""
Takes a list of strings, and sets the path component of the URL.
Components are quoted.
"""
lst = [urllib.quote(i, safe="") for i in lst]
path = "/" + "/".join(lst)
scheme, netloc, _, params, query, fragment = urlparse.urlparse(self.url)
self.url = urlparse.urlunparse(
[scheme, netloc, path, params, query, fragment]
)
def get_query(self):
"""
Gets the request query string. Returns an ODict object.
"""
_, _, _, _, query, _ = urlparse.urlparse(self.url)
if query:
return odict.ODict(utils.urldecode(query))
return odict.ODict([])
def set_query(self, odict):
"""
Takes an ODict object, and sets the request query string.
"""
scheme, netloc, path, params, _, fragment = urlparse.urlparse(self.url)
query = utils.urlencode(odict.lst)
self.url = urlparse.urlunparse(
[scheme, netloc, path, params, query, fragment]
)
def pretty_host(self, hostheader):
"""
Heuristic to get the host of the request.
Note that pretty_host() does not always return the TCP destination
of the request, e.g. if an upstream proxy is in place
If hostheader is set to True, the Host: header will be used as
additional (and preferred) data source. This is handy in
transparent mode, where only the IO of the destination is known,
but not the resolved name. This is disabled by default, as an
attacker may spoof the host header to confuse an analyst.
"""
host = None
if hostheader:
host = self.headers.get_first("host")
if not host:
host = self.host
if host:
try:
return host.encode("idna")
except ValueError:
return host
else:
return None
def pretty_url(self, hostheader):
if self.form_out == "authority": # upstream proxy mode
return "%s:%s" % (self.pretty_host(hostheader), self.port)
return utils.unparse_url(self.scheme,
self.pretty_host(hostheader),
self.port,
self.path).encode('ascii')
@property
def url(self):
"""
Returns a URL string, constructed from the Request's URL components.
"""
return utils.unparse_url(
self.scheme,
self.host,
self.port,
self.path
).encode('ascii')
@url.setter
def url(self, url):
"""
Parses a URL specification, and updates the Request's information
accordingly.
Returns False if the URL was invalid, True if the request succeeded.
"""
parts = http.parse_url(url)
if not parts:
raise ValueError("Invalid URL: %s" % url)
self.scheme, self.host, self.port, self.path = parts
def get_cookies(self):
"""
Returns a possibly empty netlib.odict.ODict object.
"""
ret = odict.ODict()
for i in self.headers["cookie"]:
ret.extend(cookies.parse_cookie_header(i))
return ret
def set_cookies(self, odict):
"""
Takes an netlib.odict.ODict object. Over-writes any existing Cookie
headers.
"""
v = cookies.format_cookie_header(odict)
self.headers["Cookie"] = [v]
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in the headers, the
request path and the body of the request. Encoded content will be
decoded before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = HTTPMessage.replace(self, pattern, repl, *args, **kwargs)
self.path, pc = utils.safe_subn(
pattern, repl, self.path, *args, **kwargs
)
c += pc
return c
class HTTPResponse(HTTPMessage):
"""
An HTTP response.
Exposes the following attributes:
httpversion: HTTP version tuple, e.g. (1,1)
code: HTTP response code
msg: HTTP response message
headers: ODict object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
httpversion,
code,
msg,
headers,
content,
timestamp_start=None,
timestamp_end=None):
assert isinstance(headers, odict.ODictCaseless) or headers is None
HTTPMessage.__init__(
self,
httpversion,
headers,
content,
timestamp_start,
timestamp_end
)
self.code = code
self.msg = msg
# Is this request replayed?
self.is_replay = False
self.stream = False
_stateobject_attributes = HTTPMessage._stateobject_attributes.copy()
_stateobject_attributes.update(
code=int,
msg=str
)
@property
def body(self):
return self.content
@classmethod
def from_state(cls, state):
f = cls(None, None, None, None, None)
f.load_state(state)
return f
def __repr__(self):
if self.content:
size = netlib.utils.pretty_size(len(self.content))
else:
size = "content missing"
return "<HTTPResponse: {code} {msg} ({contenttype}, {size})>".format(
code=self.code,
msg=self.msg,
contenttype=self.headers.get_first(
"content-type", "unknown content type"
),
size=size
)
@classmethod
def from_stream(
cls,
rfile,
request_method,
include_body=True,
body_size_limit=None):
"""
Parse an HTTP response from a file stream
"""
timestamp_start = utils.timestamp()
if hasattr(rfile, "reset_timestamps"):
rfile.reset_timestamps()
protocol = http.http1.HTTP1Protocol(rfile=rfile)
resp = protocol.read_response(
request_method,
body_size_limit,
include_body=include_body
)
if hasattr(rfile, "first_byte_timestamp"):
# more accurate timestamp_start
timestamp_start = rfile.first_byte_timestamp
if include_body:
timestamp_end = utils.timestamp()
else:
timestamp_end = None
return HTTPResponse(
resp.httpversion,
resp.status_code,
resp.msg,
resp.headers,
resp.body,
timestamp_start,
timestamp_end
)
def _assemble_first_line(self):
return 'HTTP/%s.%s %s %s' % \
(self.httpversion[0], self.httpversion[1], self.code, self.msg)
_headers_to_strip_off = ['Proxy-Connection',
'Alternate-Protocol',
'Alt-Svc']
def _assemble_headers(self, preserve_transfer_encoding=False):
headers = self.headers.copy()
for k in self._headers_to_strip_off:
del headers[k]
if not preserve_transfer_encoding:
del headers['Transfer-Encoding']
# If content is defined (i.e. not None or CONTENT_MISSING), we always
# add a content-length header.
if self.content or self.content == "":
headers["Content-Length"] = [str(len(self.content))]
return headers.format()
def _assemble_head(self, preserve_transfer_encoding=False):
return '%s\r\n%s\r\n' % (
self._assemble_first_line(),
self._assemble_headers(
preserve_transfer_encoding=preserve_transfer_encoding
)
)
def assemble(self):
"""
Assembles the response for transmission to the client. We make some
modifications to make sure interception works properly.
Raises an Exception if the request cannot be assembled.
"""
if self.content == CONTENT_MISSING:
raise proxy.ProxyError(
502,
"Cannot assemble flow with CONTENT_MISSING"
)
head = self._assemble_head()
if self.content:
return head + self.content
else:
return head
def _refresh_cookie(self, c, delta):
"""
Takes a cookie string c and a time delta in seconds, and returns
a refreshed cookie string.
"""
c = Cookie.SimpleCookie(str(c))
for i in c.values():
if "expires" in i:
d = parsedate_tz(i["expires"])
if d:
d = mktime_tz(d) + delta
i["expires"] = formatdate(d)
else:
# This can happen when the expires tag is invalid.
# reddit.com sends a an expires tag like this: "Thu, 31 Dec
# 2037 23:59:59 GMT", which is valid RFC 1123, but not
# strictly correct according to the cookie spec. Browsers
# appear to parse this tolerantly - maybe we should too.
# For now, we just ignore this.
del i["expires"]
return c.output(header="").strip()
def refresh(self, now=None):
"""
This fairly complex and heuristic function refreshes a server
response for replay.
- It adjusts date, expires and last-modified headers.
- It adjusts cookie expiration.
"""
if not now:
now = time.time()
delta = now - self.timestamp_start
refresh_headers = [
"date",
"expires",
"last-modified",
]
for i in refresh_headers:
if i in self.headers:
d = parsedate_tz(self.headers[i][0])
if d:
new = mktime_tz(d) + delta
self.headers[i] = [formatdate(new)]
c = []
for i in self.headers["set-cookie"]:
c.append(self._refresh_cookie(i, delta))
if c:
self.headers["set-cookie"] = c
def get_cookies(self):
"""
Get the contents of all Set-Cookie headers.
Returns a possibly empty ODict, where keys are cookie name strings,
and values are [value, attr] lists. Value is a string, and attr is
an ODictCaseless containing cookie attributes. Within attrs, unary
attributes (e.g. HTTPOnly) are indicated by a Null value.
"""
ret = []
for header in self.headers["set-cookie"]:
v = http.cookies.parse_set_cookie_header(header)
if v:
name, value, attrs = v
ret.append([name, [value, attrs]])
return odict.ODict(ret)
def set_cookies(self, odict):
"""
Set the Set-Cookie headers on this response, over-writing existing
headers.
Accepts an ODict of the same format as that returned by get_cookies.
"""
values = []
for i in odict.lst:
values.append(
http.cookies.format_set_cookie_header(
i[0],
i[1][0],
i[1][1]
)
)
self.headers["Set-Cookie"] = values
class HTTPFlow(Flow):
"""
A HTTPFlow is a collection of objects representing a single HTTP
transaction. The main attributes are:
request: HTTPRequest object
response: HTTPResponse object
error: Error object
server_conn: ServerConnection object
client_conn: ClientConnection object
Note that it's possible for a Flow to have both a response and an error
object. This might happen, for instance, when a response was received
from the server, but there was an error sending it back to the client.
The following additional attributes are exposed:
intercepted: Is this flow currently being intercepted?
live: Does this flow have a live client connection?
"""
def __init__(self, client_conn, server_conn, live=None):
super(HTTPFlow, self).__init__("http", client_conn, server_conn, live)
self.request = None
"""@type: HTTPRequest"""
self.response = None
"""@type: HTTPResponse"""
_stateobject_attributes = Flow._stateobject_attributes.copy()
_stateobject_attributes.update(
request=HTTPRequest,
response=HTTPResponse
)
@classmethod
def from_state(cls, state):
f = cls(None, None)
f.load_state(state)
return f
def __repr__(self):
s = "<HTTPFlow"
for a in ("request", "response", "error", "client_conn", "server_conn"):
if getattr(self, a, False):
s += "\r\n %s = {flow.%s}" % (a, a)
s += ">"
return s.format(flow=self)
def copy(self):
f = super(HTTPFlow, self).copy()
if self.request:
f.request = self.request.copy()
if self.response:
f.response = self.response.copy()
return f
def match(self, f):
"""
Match this flow against a compiled filter expression. Returns True
if matched, False if not.
If f is a string, it will be compiled as a filter expression. If
the expression is invalid, ValueError is raised.
"""
if isinstance(f, basestring):
from .. import filt
f = filt.parse(f)
if not f:
raise ValueError("Invalid filter expression.")
if f:
return f(self)
return True
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both request and
response of the flow. Encoded content will be decoded before
replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = self.request.replace(pattern, repl, *args, **kwargs)
if self.response:
c += self.response.replace(pattern, repl, *args, **kwargs)
return c
class HttpAuthenticationError(Exception):
def __init__(self, auth_headers=None):
super(HttpAuthenticationError, self).__init__(
"Proxy Authentication Required"
)
self.headers = auth_headers
self.code = 407
def __repr__(self):
return "Proxy Authentication Required"
class HTTPHandler(ProtocolHandler):
"""
HTTPHandler implements mitmproxys understanding of the HTTP protocol.
"""
def __init__(self, c):
super(HTTPHandler, self).__init__(c)
self.expected_form_in = c.config.mode.http_form_in
self.expected_form_out = c.config.mode.http_form_out
self.skip_authentication = False
def handle_messages(self):
while self.handle_flow():
pass
def get_response_from_server(self, flow):
self.c.establish_server_connection()
request_raw = flow.request.assemble()
for attempt in (0, 1):
try:
self.c.server_conn.send(request_raw)
# Only get the headers at first...
flow.response = HTTPResponse.from_stream(
self.c.server_conn.rfile,
flow.request.method,
body_size_limit=self.c.config.body_size_limit,
include_body=False
)
break
except (tcp.NetLibError, http.HttpErrorConnClosed) as v:
self.c.log(
"error in server communication: %s" % repr(v),
level="debug"
)
if attempt == 0:
# In any case, we try to reconnect at least once. This is
# necessary because it might be possible that we already
# initiated an upstream connection after clientconnect that
# has already been expired, e.g consider the following event
# log:
# > clientconnect (transparent mode destination known)
# > serverconnect
# > read n% of large request
# > server detects timeout, disconnects
# > read (100-n)% of large request
# > send large request upstream
self.c.server_reconnect()
else:
raise
# call the appropriate script hook - this is an opportunity for an
# inline script to set flow.stream = True
flow = self.c.channel.ask("responseheaders", flow)
if flow is None or flow == KILL:
raise KillSignal()
else:
# now get the rest of the request body, if body still needs to be
# read but not streaming this response
if flow.response.stream:
flow.response.content = CONTENT_MISSING
else:
protocol = http.http1.HTTP1Protocol(rfile=self.c.server_conn.rfile)
flow.response.content = protocol.read_http_body(
flow.response.headers,
self.c.config.body_size_limit,
flow.request.method,
flow.response.code,
False
)
flow.response.timestamp_end = utils.timestamp()
def handle_flow(self):
flow = HTTPFlow(self.c.client_conn, self.c.server_conn, self.live)
try:
try:
req = HTTPRequest.from_stream(
self.c.client_conn.rfile,
body_size_limit=self.c.config.body_size_limit,
wfile=self.c.client_conn.wfile
)
except tcp.NetLibError:
# don't throw an error for disconnects that happen
# before/between requests.
return False
self.c.log(
"request",
"debug",
[req._assemble_first_line(req.form_in)]
)
ret = self.process_request(flow, req)
if ret is not None:
return ret
# Be careful NOT to assign the request to the flow before
# process_request completes. This is because the call can raise an
# exception. If the request object is already attached, this results
# in an Error object that has an attached request that has not been
# sent through to the Master.
flow.request = req
request_reply = self.c.channel.ask("request", flow)
if request_reply is None or request_reply == KILL:
raise KillSignal()
# The inline script may have changed request.host
self.process_server_address(flow)
if isinstance(request_reply, HTTPResponse):
flow.response = request_reply
else:
self.get_response_from_server(flow)
# no further manipulation of self.c.server_conn beyond this point
# we can safely set it as the final attribute value here.
flow.server_conn = self.c.server_conn
self.c.log(
"response", "debug", [
flow.response._assemble_first_line()])
response_reply = self.c.channel.ask("response", flow)
if response_reply is None or response_reply == KILL:
raise KillSignal()
self.send_response_to_client(flow)
if self.check_close_connection(flow):
return False
# We sent a CONNECT request to an upstream proxy.
if flow.request.form_in == "authority" and flow.response.code == 200:
# TODO: Possibly add headers (memory consumption/usefulness
# tradeoff) Make sure to add state info before the actual
# processing of the CONNECT request happens. During an SSL
# upgrade, we may receive an SNI indication from the client,
# which resets the upstream connection. If this is the case, we
# must already re-issue the CONNECT request at this point.
self.c.server_conn.state.append(
(
"http", {
"state": "connect",
"host": flow.request.host,
"port": flow.request.port
}
)
)
if not self.process_connect_request(
(flow.request.host, flow.request.port)):
return False
# If the user has changed the target server on this connection,
# restore the original target server
flow.live.restore_server()
return True # Next flow please.
except (
HttpAuthenticationError,
http.HttpError,
proxy.ProxyError,
tcp.NetLibError,
) as e:
self.handle_error(e, flow)
except KillSignal:
self.c.log("Connection killed", "info")
finally:
flow.live = None # Connection is not live anymore.
return False
def handle_server_reconnect(self, state):
if state["state"] == "connect":
send_connect_request(
self.c.server_conn,
state["host"],
state["port"],
update_state=False
)
else: # pragma: nocover
raise RuntimeError("Unknown State: %s" % state["state"])
def handle_error(self, error, flow=None):
message = repr(error)
message_debug = None
if isinstance(error, tcp.NetLibError):
message = None
message_debug = "TCP connection closed unexpectedly."
elif "tlsv1 alert unknown ca" in message:
message = "TLSv1 Alert Unknown CA: The client does not trust the proxy's certificate."
elif "handshake error" in message:
message_debug = message
message = "SSL handshake error: The client may not trust the proxy's certificate."
if message:
self.c.log(message, level="info")
if message_debug:
self.c.log(message_debug, level="debug")
if flow:
# TODO: no flows without request or with both request and response
# at the moment.
if flow.request and not flow.response:
flow.error = Error(message or message_debug)
self.c.channel.ask("error", flow)
try:
code = getattr(error, "code", 502)
headers = getattr(error, "headers", None)
html_message = message or ""
if message_debug:
html_message += "<pre>%s</pre>" % message_debug
self.send_error(code, html_message, headers)
except:
pass
def send_error(self, code, message, headers):
response = http.status_codes.RESPONSES.get(code, "Unknown")
html_content = """
<html>
<head>
<title>%d %s</title>
</head>
<body>%s</body>
</html>
""" % (code, response, message)
self.c.client_conn.wfile.write("HTTP/1.1 %s %s\r\n" % (code, response))
self.c.client_conn.wfile.write(
"Server: %s\r\n" % self.c.config.server_version
)
self.c.client_conn.wfile.write("Content-type: text/html\r\n")
self.c.client_conn.wfile.write(
"Content-Length: %d\r\n" % len(html_content)
)
if headers:
for key, value in headers.items():
self.c.client_conn.wfile.write("%s: %s\r\n" % (key, value))
self.c.client_conn.wfile.write("Connection: close\r\n")
self.c.client_conn.wfile.write("\r\n")
self.c.client_conn.wfile.write(html_content)
self.c.client_conn.wfile.flush()
def process_request(self, flow, request):
"""
@returns:
True, if the request should not be sent upstream
False, if the connection should be aborted
None, if the request should be sent upstream
(a status code != None should be returned directly by handle_flow)
"""
if not self.skip_authentication:
self.authenticate(request)
# Determine .scheme, .host and .port attributes
# For absolute-form requests, they are directly given in the request.
# For authority-form requests, we only need to determine the request scheme.
# For relative-form requests, we need to determine host and port as
# well.
if not request.scheme:
request.scheme = "https" if flow.server_conn and flow.server_conn.ssl_established else "http"
if not request.host:
# Host/Port Complication: In upstream mode, use the server we CONNECTed to,
# not the upstream proxy.
if flow.server_conn:
for s in flow.server_conn.state:
if s[0] == "http" and s[1]["state"] == "connect":
request.host, request.port = s[1]["host"], s[1]["port"]
if not request.host and flow.server_conn:
request.host, request.port = flow.server_conn.address.host, flow.server_conn.address.port
# Now we can process the request.
if request.form_in == "authority":
if self.c.client_conn.ssl_established:
raise http.HttpError(
400,
"Must not CONNECT on already encrypted connection"
)
if self.c.config.mode == "regular":
self.c.set_server_address((request.host, request.port))
# Update server_conn attribute on the flow
flow.server_conn = self.c.server_conn
self.c.establish_server_connection()
self.c.client_conn.send(
('HTTP/%s.%s 200 ' % (request.httpversion[0], request.httpversion[1])) +
'Connection established\r\n' +
'Content-Length: 0\r\n' +
('Proxy-agent: %s\r\n' % self.c.config.server_version) +
'\r\n'
)
return self.process_connect_request(self.c.server_conn.address)
elif self.c.config.mode == "upstream":
return None
else:
# CONNECT should never occur if we don't expect absolute-form
# requests
pass
elif request.form_in == self.expected_form_in:
request.form_out = self.expected_form_out
if request.form_in == "absolute":
if request.scheme != "http":
raise http.HttpError(
400,
"Invalid request scheme: %s" % request.scheme
)
if self.c.config.mode == "regular":
# Update info so that an inline script sees the correct
# value at flow.server_conn
self.c.set_server_address((request.host, request.port))
flow.server_conn = self.c.server_conn
elif request.form_in == "relative":
if self.c.config.mode == "spoof":
# Host header
h = request.pretty_host(hostheader=True)
if h is None:
raise http.HttpError(
400,
"Invalid request: No host information"
)
p = http.parse_url("http://" + h)
request.scheme = p[0]
request.host = p[1]
request.port = p[2]
self.c.set_server_address((request.host, request.port))
flow.server_conn = self.c.server_conn
if self.c.config.mode == "sslspoof":
# SNI is processed in server.py
if not (flow.server_conn and flow.server_conn.ssl_established):
raise http.HttpError(
400,
"Invalid request: No host information"
)
return None
raise http.HttpError(
400, "Invalid HTTP request form (expected: %s, got: %s)" % (
self.expected_form_in, request.form_in
)
)
def process_server_address(self, flow):
# Depending on the proxy mode, server handling is entirely different
# We provide a mostly unified API to the user, which needs to be
# unfiddled here
# ( See also: https://github.com/mitmproxy/mitmproxy/issues/337 )
address = tcp.Address((flow.request.host, flow.request.port))
ssl = (flow.request.scheme == "https")
if self.c.config.mode == "upstream":
# The connection to the upstream proxy may have a state we may need
# to take into account.
connected_to = None
for s in flow.server_conn.state:
if s[0] == "http" and s[1]["state"] == "connect":
connected_to = tcp.Address((s[1]["host"], s[1]["port"]))
# We need to reconnect if the current flow either requires a
# (possibly impossible) change to the connection state, e.g. the
# host has changed but we already CONNECTed somewhere else.
needs_server_change = (
ssl != self.c.server_conn.ssl_established
or
# HTTP proxying is "stateless", CONNECT isn't.
(connected_to and address != connected_to)
)
if needs_server_change:
# force create new connection to the proxy server to reset
# state
self.live.change_server(self.c.server_conn.address, force=True)
if ssl:
send_connect_request(
self.c.server_conn,
address.host,
address.port
)
self.c.establish_ssl(server=True)
else:
# If we're not in upstream mode, we just want to update the host
# and possibly establish TLS. This is a no op if the addresses
# match.
self.live.change_server(address, ssl=ssl)
flow.server_conn = self.c.server_conn
def send_response_to_client(self, flow):
if not flow.response.stream:
# no streaming:
# we already received the full response from the server and can
# send it to the client straight away.
self.c.client_conn.send(flow.response.assemble())
else:
# streaming:
# First send the headers and then transfer the response
# incrementally:
h = flow.response._assemble_head(preserve_transfer_encoding=True)
self.c.client_conn.send(h)
protocol = http.http1.HTTP1Protocol(rfile=self.c.server_conn.rfile)
chunks = protocol.read_http_body_chunked(
flow.response.headers,
self.c.config.body_size_limit,
flow.request.method,
flow.response.code,
False,
4096
)
if callable(flow.response.stream):
chunks = flow.response.stream(chunks)
for chunk in chunks:
for part in chunk:
self.c.client_conn.wfile.write(part)
self.c.client_conn.wfile.flush()
flow.response.timestamp_end = utils.timestamp()
def check_close_connection(self, flow):
"""
Checks if the connection should be closed depending on the HTTP
semantics. Returns True, if so.
"""
close_connection = (
http.http1.HTTP1Protocol.connection_close(
flow.request.httpversion,
flow.request.headers
) or http.http1.HTTP1Protocol.connection_close(
flow.response.httpversion,
flow.response.headers
) or http.http1.HTTP1Protocol.expected_http_body_size(
flow.response.headers,
False,
flow.request.method,
flow.response.code) == -1
)
if close_connection:
if flow.request.form_in == "authority" and flow.response.code == 200:
# Workaround for
# https://github.com/mitmproxy/mitmproxy/issues/313: Some
# proxies (e.g. Charles) send a CONNECT response with HTTP/1.0
# and no Content-Length header
pass
else:
return True
return False
def process_connect_request(self, address):
"""
Process a CONNECT request.
Returns True if the CONNECT request has been processed successfully.
Returns False, if the connection should be closed immediately.
"""
address = tcp.Address.wrap(address)
if self.c.config.check_ignore(address):
self.c.log("Ignore host: %s:%s" % address(), "info")
TCPHandler(self.c, log=False).handle_messages()
return False
else:
self.expected_form_in = "relative"
self.expected_form_out = "relative"
self.skip_authentication = True
# In practice, nobody issues a CONNECT request to send unencrypted
# HTTP requests afterwards. If we don't delegate to TCP mode, we
# should always negotiate a SSL connection.
#
# FIXME: Turns out the previous statement isn't entirely true.
# Chrome on Windows CONNECTs to :80 if an explicit proxy is
# configured and a websocket connection should be established. We
# don't support websocket at the moment, so it fails anyway, but we
# should come up with a better solution to this if we start to
# support WebSockets.
should_establish_ssl = (
address.port in self.c.config.ssl_ports
or
not self.c.config.check_tcp(address)
)
if should_establish_ssl:
self.c.log(
"Received CONNECT request to SSL port. "
"Upgrading to SSL...", "debug"
)
self.c.establish_ssl(server=True, client=True)
self.c.log("Upgrade to SSL completed.", "debug")
if self.c.config.check_tcp(address):
self.c.log(
"Generic TCP mode for host: %s:%s" % address(),
"info"
)
TCPHandler(self.c).handle_messages()
return False
return True
def authenticate(self, request):
if self.c.config.authenticator:
if self.c.config.authenticator.authenticate(request.headers):
self.c.config.authenticator.clean(request.headers)
else:
raise HttpAuthenticationError(
self.c.config.authenticator.auth_challenge_headers())
return request.headers
class RequestReplayThread(threading.Thread):
name = "RequestReplayThread"
def __init__(self, config, flow, masterq, should_exit):
"""
masterqueue can be a queue or None, if no scripthooks should be
processed.
"""
self.config, self.flow = config, flow
if masterq:
self.channel = controller.Channel(masterq, should_exit)
else:
self.channel = None
super(RequestReplayThread, self).__init__()
def run(self):
r = self.flow.request
form_out_backup = r.form_out
try:
self.flow.response = None
# If we have a channel, run script hooks.
if self.channel:
request_reply = self.channel.ask("request", self.flow)
if request_reply is None or request_reply == KILL:
raise KillSignal()
elif isinstance(request_reply, HTTPResponse):
self.flow.response = request_reply
if not self.flow.response:
# In all modes, we directly connect to the server displayed
if self.config.mode == "upstream":
server_address = self.config.mode.get_upstream_server(
self.flow.client_conn
)[2:]
server = ServerConnection(server_address)
server.connect()
if r.scheme == "https":
send_connect_request(server, r.host, r.port)
server.establish_ssl(
self.config.clientcerts,
sni=self.flow.server_conn.sni
)
r.form_out = "relative"
else:
r.form_out = "absolute"
else:
server_address = (r.host, r.port)
server = ServerConnection(server_address)
server.connect()
if r.scheme == "https":
server.establish_ssl(
self.config.clientcerts,
sni=self.flow.server_conn.sni
)
r.form_out = "relative"
server.send(r.assemble())
self.flow.server_conn = server
self.flow.response = HTTPResponse.from_stream(
server.rfile,
r.method,
body_size_limit=self.config.body_size_limit
)
if self.channel:
response_reply = self.channel.ask("response", self.flow)
if response_reply is None or response_reply == KILL:
raise KillSignal()
except (proxy.ProxyError, http.HttpError, tcp.NetLibError) as v:
self.flow.error = Error(repr(v))
if self.channel:
self.channel.ask("error", self.flow)
except KillSignal:
# KillSignal should only be raised if there's a channel in the
# first place.
self.channel.tell("log", proxy.Log("Connection killed", "info"))
finally:
r.form_out = form_out_backup
|
byt3bl33d3r/mitmproxy
|
libmproxy/protocol/http.py
|
Python
|
mit
| 56,839
|
#
# Dalton Burke, CU Denver
#
# CONUS = [-124.7844079,-66.9513812,24.7433195,49.3457868]
from __future__ import absolute_import
from utils import ensure_dir, symlink_unless_exists
from .downloader import download_url, DownloadError, get_dList
# fast searching of dList
from bisect import bisect
from datetime import datetime, timedelta
from pyhdf import SD
import pytz
import requests
import os
import os.path as osp
import sys
import logging
from six.moves import map
from six.moves import range
class data_sourceError(Exception):
"""
Raised when a level0 source cannot retreive files
"""
pass
class data_source(object):
"""
Parent class of all data sources that implement common functionality, for example
- local validation (file size check)
- HDF retrieval with retries (smart check whether server implements http-range)
"""
def __init__(self, ingest_dir):
"""
Initialize level0 source with ingest directory (where level0 files are stored).
:param ingest_dir: root of level0 storage
"""
self.ingest_dir = osp.abspath(osp.expanduser(ingest_dir))
def retrieve_data(self, from_utc, to_utc, lonlat):
"""
Retrieves all data (geo and active fire) in the given time range and longitude/latitude box. This function is what end users will use to get data
:param from_utc: start time
:param to_utc: end time
:param lonlat: list of form [lowlon, highlon, lowlat, highlat] describing longitude/latitude box
:return: list of paths to local files that were retrieved
"""
# I think all data should be ingested into one directory, then whichever files
# are needed for a given job can be copied to a new folder with a job name
two_weeks_ago = datetime.utcnow() - timedelta(days=14)
manifest = []
if from_utc > two_weeks_ago:
manifest.extend(self.retrieve_l0(from_utc, to_utc))
elif to_utc < two_weeks_ago:
# filter geo_list on intersection with lonlat, the hdf library i'd want to use here isn't ready yet
geo_list = [x for x in self.retrieve_geo(from_utc, to_utc) if geo_intersects(self.ingest_dir + '/' + x, lonlat)]
# geo_list = retrieve_geo(from_utc, to_utc)
manifest.extend(geo_list)
manifest.extend(self.retrieve_af(geo_list))
else:
manifest.extend(self.retrieve_l0(two_weeks_ago + timedelta(minutes=10), to_utc))
# filter geo_list on intersection with lonlat
geo_list = [x for x in self.retrieve_geo(from_utc, two_weeks_ago) if geo_intersect(self.ingest_dir + '/' + x, lonlat)]
# geo_list = retrieve_geo(from_utc, two_weeks_ago)
manifest.extend(geo_list)
manifest.extend(self.retrieve_af(geo_list))
return manifest
def retrieve_geo(self, from_utc, to_utc, ref_utc = None):
"""
Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
pass
def compute_geo_manifest(from_utc, to_utc):
"""
Get list of geolocation file names for the given time frame
:param from_utc: start time UTC
:param to_utc: end time UTC
:return: list of file names as strings
"""
pass
def retrieve_af(self, geo_list):
"""
Attempts to retrieve active fire files in the time range and latitude/longitude box
:param geo_list: list containing the relevant geolocation file names
:return: a list of paths to the local active fire files
"""
pass
def compute_af_manifest(geo_list):
"""
get list of active fire file names from a set of geolocation files
:param geo_list: list containing geolocation file names
"""
pass
def retrieve_l0(self, from_utc, to_utc, ref_utc = None):
"""
Attempts to retrieve the firedata files for the time range.
It should be first verified whether the firedata files are available locally.
For any unavailable files, downloads should be initiated.
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local level0 files
"""
pass
def compute_l0_manifest(self, from_utc, to_utc):
"""
Compute list of files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
pass
def manifest_from_geo(self, geo_list, granule_name):
# prefix later tells us what url we should be looking at
prefix = ''
file_list = []
# pulls directory listing of each relevant page (determined by the 'prefix' of each geo file)
# this yields a super set of the active fire files we care about, we'll refine the list in the next part
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
# manifest contains the final set of exact filenames we care about
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, granule_name + g[5:24] + '99999999999999.hdf') - 1])
return manifest
def download_file(self, url_base, rel_path, max_retries=3):
"""
Download a file and stream to <rel_path> in ingest_dir.
:param url_base: the base URL where the file is hosted
:param rel_path: the relative path of the file
:param max_retries: how many times we may retry to download the file
"""
url = url_base + '/' + rel_path
path = osp.join(self.ingest_dir, rel_path)
try:
# print 'downloading', url
download_url(url, path, max_retries)
# print 'done'
except DownloadError as e:
raise data_sourceError('data_source: failed to download file %s' % url)
def available_locally(self, path):
"""
Check in a level0 file is available locally and if it's file size checks out.
:param path: the level0 file path
"""
info_path = path + '.size'
if osp.exists(path) and osp.exists(info_path):
content_size = int(open(info_path).read())
return osp.getsize(path) == content_size
else:
return False
class MODIS_TERRA(data_source):
"""
750m data from the MODIS instrument on the Terra satellite
"""
def __init__(self, ingest_dir):
# if(satellite = 'terra'):
# self.geo_gran = 'MOD03'
# self.af_gran = 'MOD14'
# elif(satellite = 'aqua'):
# self.geo_gran = 'MYD03'
# self.af_gran = 'MYD14'
# else:
# raise Exception(ValueError)
super(MODIS_TERRA, self).__init__(ingest_dir)
def retrieve_geo(self, from_utc, to_utc):
"""
Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
manifest = self.compute_geo_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving geolocation data from %s' % (self.url_base_hdf + '/' + self.filepath_geo))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_geo + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_geo_manifest(self, from_utc, to_utc):
"""
Get list of geolocation file names for the given time frame
:param from_utc: start time UTC
:param to_utc: end time UTC
:return: list of file names as strings
"""
# I don't really want to deal with splitting it on years, so we'll recurse on that
# from now on we can assume that to and from occur in the same year
start_year = from_utc.year
if start_year != to_utc.year:
return compute_geo_manifest(from_utc, datetime(year=start_year, month=12,day=31,hour=23,minute=59)) + \
compute_geo_manifest(datetime(year=start_year+1, month=1, day=1, hour=0, minute=0), to_utc)
# The source has data for different days in different folders, we'll need to get their paths for each day
start_day = (from_utc - datetime(start_year, 1,1)).days + 1
end_day = (to_utc - datetime(start_year, 1, 1)).days + 1
file_list = []
for day in range(start_day, end_day + 1):
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_geo + '/' + str(start_year) + '/' + str(day)))
# we now have a list with all of the filenames during the days that the query requested, so now we'll trim the stuff at the front and back we don't need
# invent a sample filename for the start time, they look like this:
# MOD03.AYYYYDDDD.HHMM.006.#############.hdf
start_filename = 'MOD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, start_day, from_utc.hour, from_utc.minute)
# bisect searches for that sample name and returns the index of where that file should go
# to make sure we get that data we start at the file before it (-1)
start_index = bisect(file_list, start_filename) - 1
# we'll do the same for the last one
end_filename = 'MOD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, end_day, to_utc.hour, to_utc.minute)
end_index = bisect(file_list, end_filename)
return file_list[start_index:end_index]
def retrieve_af(self, geo_list):
"""
Attempts to retrieve active fire files in the time range and latitude/longitude box
:param geo_list: list containing the relevant geolocation file names
:return: a list of paths to the local active fire files
"""
manifest = self.compute_af_manifest(geo_list)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving active fire data from %s' % (self.url_base_hdf + '/' + self.filepath_af))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_af + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_af_manifest(self, geo_list):
"""
get list of active fire file names from a set of geolocation files
:param geo_list: list containing geolocation file names
"""
prefix = ''
file_list = []
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, 'MOD14' + g[5:24] + '99999999999999.hdf') - 1])
return manifest
def retrieve_l0(self, from_utc, to_utc):
"""
Attempts to retrieve the files to satisfy the simulation request from from_utc to to_utc.
:param from_utc: start time
:param to_utc: end time
:return: list of paths to local level0 files
"""
# This only works for requests going back about the last two weeks
# can add a source for older data later, but I don't think it's needed,
# given the purpose of the project.
if from_utc < datetime.utcnow() - timedelta(days=14):
raise data_sourceError('Requested data older than two weeks')
manifest = self.compute_l0_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving level0s from %s' % (self.url_base_l0 + '/' + self.filepath_l0))
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0, x), nonlocals))
return manifest
def compute_l0_manifest(self, from_utc, to_utc):
"""
Compute list of files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0)
# We want a list of all of the filenames which land between from_utc and to_utc
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# Filenames have this pattern: P0420064AAAAAAAAAAAAAAyyDDDhhmmss000.PDS
current_time = from_utc
days = (current_time - datetime(current_time.year, 1, 1)).days + 1
year = current_time.year % 100
filename = 'P0420064AAAAAAAAAAAAAA%02d%03d%02d%02d%02d000.PDS' % (year, days,
current_time.hour,
current_time.minute,
current_time.second)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
# (-2 since the files come in pairs, one that ends in 000.PDS and one that ends in 001.PDS)
if index == len(dList):
index = index - 2
elif dList[index] != filename:
index = index - 2
level0manifest = []
# Now that we know where to start, we'll begin filling the manifest with relevant files
while current_time < to_utc:
# Add 000.PDS file to manifest
level0manifest.append(dList[index])
# Add 001.PDS file to manifest
level0manifest.append(dList[index+1])
# Move the index to the next pair, if we run out of files just break
index = index + 2
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the time that we get from this exceeds to_utc, we have all the data we want
current_time = current_time.replace(year = 2000 + int(current_file[22:24]))
current_time = current_time.replace(day=1, month=1)
current_time = current_time + timedelta(days=int(current_file[24:27]) - 1)
current_time = current_time.replace(hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
url_base_l0 = 'ftp://is.sci.gsfc.nasa.gov'
filepath_l0 = 'gsfcdata/terra/modis/level0'
url_base_hdf = 'ftp://ladsweb.nascom.nasa.gov'
filepath_geo = 'allData/6/MOD03'
filepath_af = 'allData/6/MOD14'
# Near clone of MODIS_TERRA, only changes to url and filename
class MODIS_AQUA(data_source):
"""
750m data from the MODIS instrument on the Aqua satellite
Uniqueness- Requires data from two directories on the source server,
modis data denoted with _m, and gbad data denoted with _g
"""
def __init__(self, ingest_dir):
super(MODIS_AQUA, self).__init__(ingest_dir)
def retrieve_geo(self, from_utc, to_utc):
"""
Attempts to retrieve geolocation files in the time range
First, check if they're available locally, if unavailable proceed to download
:param from_utc: start time
:param to_utc: end time
:return: a list of paths to local geolocation files
"""
manifest = self.compute_geo_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving geolocation data from %s' % (self.url_base_hdf + '/' + self.filepath_geo))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_geo + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_geo_manifest(self, from_utc, to_utc):
"""
Get list of geolocation file names for the given time frame
:param from_utc: start time UTC
:param to_utc: end time UTC
:return: list of file names as strings
"""
# I don't really want to deal with splitting it on years, so we'll recurse on that
# from now on we can assume that to and from occur in the same year
start_year = from_utc.year
if start_year != to_utc.year:
return compute_geo_manifest(from_utc, datetime(year=start_year, month=12,day=31,hour=23,minute=59)) + \
compute_geo_manifest(datetime(year=start_year+1, month=1, day=1, hour=0, minute=0), to_utc)
# The source has data for different days in different folders, we'll need to get their paths for each day
start_day = (from_utc - datetime(start_year, 1,1)).days + 1
end_day = (to_utc - datetime(start_year, 1, 1)).days + 1
file_list = []
for day in range(start_day, end_day + 1):
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_geo + '/' + str(start_year) + '/' + str(day)))
geoMeta = []
i = from_utc.replace(hour=0,minute=0,second=0,microsecond=0)
end_date = to_utc.replace(hour=0,minute=0,second=0,microsecond=0)
gran = 'MYD03'
url = 'ftp://ladsweb.nascom.nasa.gov'
path = 'geoMeta/6/AQUA'
while i <= end_date:
#geoMeta.append('ftp://ladsweb.nascom.nasa.gov/geoMeta/6/AQUA/' + str(year) + '/MYD03_' + str(year) + '-' + str(month) + '-' + str(day) + '.txt')
geoMeta.append('%s/%s/%04d/%s_%04d-%02d-%02d.txt' % (url, path, i.year, gran, i.year, i.month, i.day))
i = i + timedelta(days=1)
#######################################################################################################################################################
# we now have a list with all of the filenames during the days that the query requested
# so now we'll trim the stuff at the front and back we don't need.
# invent a sample filename for the start time, they look like this:
# MYD03.AYYYYDDDD.HHMM.006.#############.hdf
start_filename = 'MYD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, start_day, from_utc.hour, from_utc.minute)
# bisect searches for that sample name and returns the index of where that file should go
# to make sure we get that data we start at the file before it (-1)
start_index = bisect(file_list, start_filename) - 1
# we'll do the same for the last one
end_filename = 'MYD03.A%04d%03d.%02d%02d.006.9999999999999.hdf' % (start_year, end_day, to_utc.hour, to_utc.minute)
end_index = bisect(file_list, end_filename)
return file_list[start_index:end_index]
def retrieve_af(self, geo_list):
"""
Attempts to retrieve active fire files in the time range and latitude/longitude box
:param geo_list: list containing the relevant geolocation file names
:return: a list of paths to the local active fire files
"""
manifest = self.compute_af_manifest(geo_list)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving active fire data from %s' % (self.url_base_hdf + '/' + self.filepath_af))
list(map(lambda x: self.download_file(self.url_base_hdf + '/' + self.filepath_af + '/' + x[7:11] + '/' + x[11:14], x), nonlocals))
return manifest
def compute_af_manifest(self, geo_list):
"""
get list of active fire file names from a set of geolocation files
:param geo_list: list containing geolocation file names
"""
prefix = ''
file_list = []
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, 'MYD14' + g[5:24] + '99999999999999.hdf') - 1])
return manifest
def retrieve_l0(self, from_utc, to_utc):
"""
Attempts to retrive the files to satisfy the simulation request from from_utc to to_utc.
:param from_utc: start time
:param to_utc: end time
:return: list of paths to local level0 files
"""
# This only works for requests going back about the last two weeks
# can add a source for older data later, but I don't think it's needed,
# given the purpose of the project.
if from_utc < datetime.utcnow() - timedelta(days=14):
raise data_sourceError('Requested data older than two weeks')
manifest_m = self.compute_l0_manifest_m(from_utc, to_utc)
manifest_g = self.compute_l0_manifest_g(from_utc, to_utc)
nonlocals_m = [x for x in manifest_m if not self.available_locally(osp.join(self.ingest_dir, x))]
nonlocals_g = [x for x in manifest_g if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving level0s from %s' % self.url_base_l0 + '/' + self.filepath_l0_m)
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0_m, x), nonlocals_m))
logging.info('Retrieving level0s from %s' % self.url_base_l0 + '/' + self.filepath_l0_g)
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0_g, x), nonlocals_g))
return manifest_m + manifest_g
def compute_l0_manifest_m(self, from_utc, to_utc):
"""
Compute list of MODIS files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# We want a list of all of the filenames which land between from_utc and to_utc
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0_m)
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# Filenames have this pattern: P1540064AAAAAAAAAAAAAAyyDDDhhmmss000.PDS
current_time = from_utc
days = (current_time - datetime(current_time.year, 1, 1)).days + 1
year = current_time.year % 100
filename = 'P1540064AAAAAAAAAAAAAA%02d%03d%02d%02d%02d000.PDS' % (year, days,
current_time.hour,
current_time.minute,
current_time.second)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
# (-2 since the files come in pairs, one that ends in 000.PDS and one that ends in 001.PDS)
if index == len(dList):
index = index - 2
elif dList[index] != filename:
index = index - 2
level0manifest = []
while current_time < to_utc:
# Add 000.PDS File
level0manifest.append(dList[index])
# Add 001.PDS file
level0manifest.append(dList[index+1])
# Move index to start of next pair,
index = index + 2
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the time on the next file is bigger than to_utc, then we have all the files we care about
current_time = current_time.replace(year = 2000 + int(current_file[22:24]))
current_time = current_time.replace(day=1, month=1)
current_time = current_time + timedelta(days=int(current_file[24:27]) - 1)
current_time = current_time.replace(hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
def compute_l0_manifest_g(self, from_utc, to_utc):
"""
Compute list of GBAD files (AQUA specific) in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# We want a list of all of the filenames which land between from_utc and to_utc
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0_g)
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# Filenames have this pattern: P1540064AAAAAAAAAAAAAAyyDDDhhmmss000.PDS
current_time = from_utc
days = (current_time - datetime(current_time.year, 1, 1)).days + 1
year = current_time.year % 100
filename = 'P1540957AAAAAAAAAAAAAA%02d%03d%02d%02d%02d000.PDS' % (year, days,
current_time.hour,
current_time.minute,
current_time.second)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
# (-4 because for each time there are 4 GBAD files, however there are only 2 we care for)
if index == len(dList):
index = index - 4
elif dList[index] != filename:
index = index - 4
level0manifest = []
while current_time < to_utc:
# Add 000.PDS file
level0manifest.append(dList[index])
# Add 001.PDS file
level0manifest.append(dList[index+1])
# Move index to next pair, (remember, there are 4 GBAD files, we only care about 2 of them)
# If we run out of filenames before reaching to_utc, that's fine, just break
index = index + 4
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the new time is bigger than to_utc, we have all of the files we care about
current_time = current_time.replace(year = 2000 + int(current_file[22:24]))
current_time = current_time.replace(day=1, month=1)
current_time = current_time + timedelta(days=int(current_file[24:27]) - 1)
current_time = current_time.replace(hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
url_base_l0 = 'ftp://is.sci.gsfc.nasa.gov'
filepath_l0_m = 'gsfcdata/aqua/modis/level0'
filepath_l0_g = 'gsfcdata/aqua/gbad'
url_base_hdf = 'ftp://ladsweb.nascom.nasa.gov'
filepath_geo = 'allData/6/MYD03'
filepath_af = 'allData/6/MYD14'
class VIIRS_NPP(data_source):
"""
375m data from VIIRS instrument on the NPP satellite
"""
def __init__(self, ingest_dir):
super(VIIRS_NPP, self).__init__(ingest_dir)
def retrieve_l0(self, from_utc, to_utc):
"""
Attempts to retrive the files to satisfy the simulation request from from_utc to to_utc.
:param from_utc: start time
:param to_utc: end time
:return: list of paths to local level0 files
"""
# This only works for requests going back about the last two weeks
# can add a source for older data later, but I don't think it's needed,
# given the purpose of the project.
if from_utc < datetime.utcnow() - timedelta(days=14):
raise data_sourceError('Requested data older than two weeks')
manifest = self.compute_l0_manifest(from_utc, to_utc)
nonlocals = [x for x in manifest if not self.available_locally(osp.join(self.ingest_dir, x))]
logging.info('Retrieving level0s from %s' % self.url_base_l0 + '/' + self.filepath_l0)
list(map(lambda x:self.download_file(self.url_base_l0 + '/' + self.filepath_l0, x), nonlocals))
return manifest
def compute_l0_manifest(self, from_utc, to_utc):
"""
Compute list of files in the source for the given time frame
:param from_utc: time UTC format
:param to_utc: time UTC format
:return: list of file names as strings
"""
# We want a list of all of the filenames which land between from_utc and to_utc
# Retrieve the directory listing
dList = get_dList(self.url_base_l0 + '/' + self.filepath_l0)
# Gameplan:
# What would a file that starts exactly at from_utc look like?
# format: RNSCA-RVIRS_npp_dYYYYMMdd_thhmmssS_ehhmmssS_bnnnnn_cnnnnnnnnnnnnnnnnnnnn_aaaa_aaa.h5
filename = 'RNSCA-RVIRS_npp_d%04d%02d%02d_t%02d00000_e000000_b00000_c00000000000000000000_aaaa_aaa.h5' % (from_utc.year,
from_utc.month,
from_utc.day,
from_utc.hour)
# Then, we find out where that filename would go in the dList
# This call binary searches dList for filename, and returns it's index (pretty efficient)
# If the filename is not found, it returns the index of the first file larger than it
index = bisect(dList, filename)
# If the filename we made up is not in the list (very likely), we actually want the first file
# smaller than the filename, so we still get the data for that time period
if index == len(dList):
index = index - 1
elif dList[index] != filename:
index = index - 1
current_time = from_utc
level0manifest = []
# there are strange gaps in times between files that I can't reconcile
# so I just take the start of the next file as current_time
while current_time < to_utc:
# Get the file
level0manifest.append(dList[index])
index = index + 1
if index >= len(dList):
break
current_file = dList[index]
# Change time to match the next file, use that time to compare to to_utc
# If the time of the next file is bigger than to_utc, then we have all of the files we care about
current_time = current_time.replace(year=int(current_file[17:21]),
month=int(current_file[21:23]),
day=int(current_file[23:25]),
hour=int(current_file[27:29]),
minute=int(current_file[29:31]),
second=int(current_file[31:33]))
return level0manifest
url_base_l0 = 'ftp://is.sci.gsfc.nasa.gov'
filepath_l0 = 'gsfcdata/npp/viirs/level0'
def geo_intersects(filename, lonlat):
"""
Checks a geolocation file for overlap with a latitude longitude box
:filename: name of file to check
:lonlat: list, [leftlon, rightlon, botlat, toplat]
:return: boolean, true if there was overlap
"""
logging.info("Checking %s for intersection with given lonlat" % filename)
if filename[-4:] != '.hdf':
logging.info("ERROR: %s is not an hdf file" % filename)
return False
try:
hdf = SD.SD(filename)
except:
logging.info("ERROR: failed to load file: %s" % filename)
return False
lon = hdf.select('Longitude')
lat = hdf.select('Latitude')
dim1 = len(lon[:])
dim2 = len(lon[0])
minlon = float(lon[0][0])
maxlon = float(lon[dim1 - 1][dim2 - 1])
minlat = float(lat[dim1 - 1][dim2 - 1])
maxlat = float(lat[0][0])
if minlon > maxlon:
logging.info("File %s crosses dateline (not currently supported), skipping..." % filename)
return False
lonoverlap = minlon < lonlat[1] and maxlon > lonlat[0]
latoverlap = minlat < lonlat[3] and maxlat > lonlat[2]
intersects = lonoverlap and latoverlap
if intersects:
logging.info("File %s intersects given lonlat")
else:
logging.info("File %s does not intersect given lonlat")
return intersects
def manifest_from_geo(geo_list, granule_name):
prefix = ''
file_list = []
for g in geo_list:
if g[:19] != prefix:
prefix = g[:19]
file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
manifest = []
# Search for what the name should look like and use that index to add that name to the manifest
# this takes n*log(n) time, which I think is pretty good
for g in geo_list:
manifest.append(file_list[bisect(file_list, granule_name + g[5:24] + '99999999999999.hdf') - 1])
return manifest
# wisdom src/vis, postprocessor
# def compute_af_manifest(self, geo_list):
# """
# get list of active fire file names from a set of geolocation files
#
# :param geo_list: list containing geolocation file names
# """
#
# prefix = ''
# file_list = []
#
# for g in geo_list:
# if g[:19] != prefix:
# prefix = g[:19]
# file_list.extend(get_dList(self.url_base_hdf + '/' + self.filepath_af + '/' + str(prefix[7:11]) + '/' + str(prefix[11:14])))
#
# manifest = []
#
# # Search for what the name should look like and use that index to add that name to the manifest
# # this takes n*log(n) time, which I think is pretty good
# for g in geo_list:
# manifest.append(file_list[bisect(file_list, 'MYD14' + g[5:24] + '99999999999999.hdf') - 1])
#
# return manifest
|
vejmelkam/wrfxpy
|
src/ingest/level0_source.py
|
Python
|
mit
| 36,889
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.plugins import BeetsPlugin
from beets.dbcore import types
from beets.util.confit import ConfigValueError
from beets import library
class TypesPlugin(BeetsPlugin):
@property
def item_types(self):
return self._types()
@property
def album_types(self):
return self._types()
def _types(self):
if not self.config.exists():
return {}
mytypes = {}
for key, value in self.config.items():
if value.get() == 'int':
mytypes[key] = types.INTEGER
elif value.get() == 'float':
mytypes[key] = types.FLOAT
elif value.get() == 'bool':
mytypes[key] = types.BOOLEAN
elif value.get() == 'date':
mytypes[key] = library.DateType()
else:
raise ConfigValueError(
u"unknown type '{0}' for the '{1}' field"
.format(value, key))
return mytypes
|
LordSputnik/beets
|
beetsplug/types.py
|
Python
|
mit
| 1,775
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_death_watch_mandalorian_belt_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","armor_mandalorian_belt")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/loot/loot_schematic/shared_death_watch_mandalorian_belt_schematic.py
|
Python
|
mit
| 509
|
from yowsup.layers.protocol_contacts.protocolentities.iq_sync_get import GetSyncIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_contacts.protocolentities.test_iq_sync import SyncIqProtocolEntityTest
class GetSyncIqProtocolEntityTest(SyncIqProtocolEntityTest):
def setUp(self):
super(GetSyncIqProtocolEntityTest, self).setUp()
self.ProtocolEntity = GetSyncIqProtocolEntity
users = [
ProtocolTreeNode("user", data = "abc"),
ProtocolTreeNode("user", data = "xyz")
]
syncNode = self.node.getChild("sync")
syncNode.setAttribute("mode", GetSyncIqProtocolEntity.MODE_DELTA)
syncNode.setAttribute("context", GetSyncIqProtocolEntity.CONTEXT_INTERACTIVE)
syncNode.addChildren(users)
|
felix-dumit/campusbot
|
yowsup2/yowsup/layers/protocol_contacts/protocolentities/test_iq_sync_get.py
|
Python
|
mit
| 810
|
__author__ = 'bernardo'
import csv
import mechanize
import re
import time
import BeautifulSoup
import smtplib
def sendEmail(result, sfrom, pwd, sto):
SMTP_SERVER = 'smtp.live.com'
SMTP_PORT = 587
sender = 'sfrom'
recipient = 'sto'
subject = 'Promocoes'
body = result
body = "<b>" + body + "<b>"
headers = ["From: " + sender,
"Subject: " + subject,
"To: " + recipient,
"MIME-Version: 1.0",
"Content-Type: text/html"]
headers = "\r\n".join(headers)
session = smtplib.SMTP(SMTP_SERVER , SMTP_PORT)
session.ehlo()
session.starttls()
session.ehlo
session.login(sender, pwd)
session.sendmail(sender, recipient, headers + "\r\n\r\n" + body)
session.quit()
|
sean-smith/website_status
|
inemail.py
|
Python
|
mit
| 782
|
# -*- coding: utf-8 -*-
#
# LICENCE MIT
#
# DESCRIPTION Callgraph builder.
#
# AUTHOR Michal Bukovsky <michal.bukovsky@trilogic.cz>
#
from operator import attrgetter
from inspect import signature
from callgraph.hooks import Hooks
from callgraph.utils import AuPair
from callgraph.symbols import Symbol, UnarySymbol
from callgraph.symbols import IterableConstantSymbol, MappingConstantSymbol
from callgraph.nodes import make_node
from callgraph.indent_printer import IndentPrinter, NonePrinter, dump_tree
# TODO(burlog): hooks as callbacks
# TODO(burlog): properties tests
# TODO(burlog): process signature? are defs invoked during import?
# TODO(burlog): tests for global variables
# TODO(burlog): __getattr__, __getattribute__ overrides will be problem
# TODO(burlog): make result of list(), tuple(), dict(), ... iterable
class CallGraphBuilder(object):
def __init__(self, global_variables={}, silent=False):
self.printer = NonePrinter() if silent else IndentPrinter()
self.global_symbols = self.make_kwargs_symbols(global_variables)
self.hooks = Hooks(self)
self.current_lineno = 0
self.tot = None
def print_banner(self, printer, node):
extra = "<" + node.qualname + "> " if node.qualname != node.name else ""
printer("@ Analyzing: {0} {1}at {2}:{3}"\
.format(node.ast.name, extra, node.filename, node.lineno))
def set_current_lineno(self, printer, expr_lineno):
lineno = self.tot.lineno + expr_lineno
if lineno == self.current_lineno: return
self.current_lineno = lineno
printer("+ line at {0}:{1}".format(self.tot.filename, lineno))
printer("+", self.tot.source_line(expr_lineno).strip())
def make_kwargs_symbols(self, kwargs):
return dict((k, UnarySymbol(self, k, v)) for k, v in kwargs.items())
def build(self, function, kwargs={}):
self.root = None
self.hooks.clear()
symbol = UnarySymbol(self, function.__name__, function)
return self.process(symbol, kwargs=self.make_kwargs_symbols(kwargs))
def process(self, symbol, parent=None, args=[], kwargs={}):
# attach new node to parent list
node = make_node(symbol)
with AuPair(self, node):
if parent:
where = parent.filename, self.current_lineno
if not parent.attach(node, where): return node
# builtins or c/c++ objects have no code
if node.is_opaque: return node
if not symbol.iscallable(): return node
# print nice banner
self.print_banner(self.printer, node)
# magic follows
with self.printer as printer:
self.inject_arguments(printer, node, args, kwargs)
self.process_function(printer, node, args, kwargs)
return node
def process_function(self, printer, node, args, kwargs):
for expr in node.ast.body:
for callee, args, kwargs in expr.evaluate(printer, node.symbol):
self.process(callee, node, args.copy(), kwargs.copy())
def inject_arguments(self, printer, node, args, kwargs):
sig = signature(node.symbol.value)
self.inject_self(printer, node, sig, args, kwargs)
bound = sig.bind_partial(*args, **self.polish_kwargs(sig, kwargs))
self.inject_defaults(printer, node, sig, bound)
for name, value in bound.arguments.items():
value_symbol = self.as_symbol(value)
printer("% Binding argument:", name + "=" + str(value_symbol))
node.symbol.set(name, value_symbol)
def polish_kwargs(self, sig, kwargs):
for param in sig.parameters.values():
if param.kind == param.VAR_KEYWORD:
return kwargs
return dict(self.iter_kwargs(sig, kwargs))
def iter_kwargs(self, sig, kwargs):
for param in sig.parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD:
if param.name in kwargs:
yield param.name, kwargs[param.name]
def inject_self(self, printer, node, sig, args, kwargs):
if node.symbol.myself and sig.parameters:
# TODO(burlog): better bound method detection
if next(iter(sig.parameters.keys())) == "self":
args.insert(0, node.symbol.myself)
else:
# TODO(burlog): improve detection logic
kwargs["self"] = node.symbol.myself
def inject_defaults(self, printer, node, sig, bound):
for param in sig.parameters.values():
if param.name not in bound.arguments:
if param.default is not param.empty:
symbol = UnarySymbol(self, param.name, param.default)
bound.arguments[param.name] = symbol
def as_symbol(self, value):
if isinstance(value, Symbol):
return value
elif isinstance(value, (tuple, list)):
return IterableConstantSymbol(self, tuple, value)
elif isinstance(value, dict):
values = list(value.values())
keys = list(UnarySymbol(self, "str", k) for k in value.keys())
return MappingConstantSymbol(self, dict, keys, values)
raise RuntimeError("Can't convert value to symbol: " + str(value))
# dogfooding build function
if __name__ == "__main__":
builder = CallGraphBuilder()
kwargs = {"self": CallGraphBuilder, "function": CallGraphBuilder.build}
root = builder.build(CallGraphBuilder.build, kwargs)
print(80 * "=")
dump_tree(root, lambda x: x.children)
|
burlog/py-static-callgraph
|
callgraph/builder.py
|
Python
|
mit
| 5,632
|
"""Subclass of IsClassMethods, which is generated by wxFormBuilder."""
from beatle import model
from beatle.lib import wxx
from beatle.activity.models.ui import ui as ui
from beatle.app.utils import cached_type
# Implementing IsClassMethods
class IsClassMethods(ui.IsClassMethods):
"""
This dialog allows to add/remove is_class methods.
"""
@wxx.SetInfo(__doc__)
def __init__(self, parent, container):
"""Dialog initialization"""
super(IsClassMethods, self).__init__(parent)
# container es la clase base
self.container = container
# create a map of feasible casts
self._classes = []
for k in container._deriv:
self.visit(k)
# get current methods
self._is_class_methods = container(model.cc.IsClassMethod)
# create map from names to implementations
self._is_class_method_names = dict([(x._name, x) for x in self._is_class_methods])
# create map from feasible is_class to current impl
self._map = {}
for k in self._classes:
name = k.scoped
name = "is_" + name.replace('::', '_')
if name in self._is_class_method_names:
self._map[name] = (k, self._is_class_method_names[name])
else:
self._map[name] = (k, None)
# do a label insertion remembering state
pos = 0
for k in self._map:
v = self._map[k]
self.m_checkList2.Insert(k, pos, v)
if v[1]:
self.m_checkList2.Check(pos)
pos = pos + 1
def visit(self, k):
"""Add inheritance branch"""
for l in k._deriv:
self.visit(l)
self._classes.append(k)
def get_kwargs(self):
"""Returns kwargs dictionary suitable for objects creation"""
kwargs_list = []
tbool = cached_type(self.container.project, 'bool')
for item in range(0, self.m_checkList2.GetCount()):
v = self.m_checkList2.GetClientData(item)
c = self.m_checkList2.IsChecked(item)
if (c and v[1]) or (not c and not v[1]):
continue
if c:
kwargs = {}
derivative = v[0]
kwargs['parent'] = self.container
kwargs['name'] = 'is_' + derivative.scoped.replace('::', '_')
kwargs['type'] = model.cc.typeinst(
type=tbool, const=True)
kwargs['constmethod'] = True
kwargs['note'] = 'This method checks if the instance is specialized as {0}'.format(derivative.GetFullLabel())
kwargs['declare'] = True
kwargs['implement'] = True
kwargs['content'] = '\treturn ( dynamic_cast<const {0}*>(this) != nullptr );'.format(derivative.scoped)
kwargs_list.append(kwargs)
else:
v[1].Delete()
return kwargs_list
|
melviso/phycpp
|
beatle/activity/models/ui/dlg/cc/IsClassMethods.py
|
Python
|
gpl-2.0
| 2,970
|
import os
from gettext import gettext as _
from pulp.bindings.exceptions import NotFoundException
from pulp.client.arg_utils import convert_boolean_arguments
from pulp.client.extensions.decorator import priority
from pulp.client.extensions.extensions import PulpCliCommand, PulpCliOption
from pulp.client.commands.polling import PollingCommand
from pulp.client.commands.consumer.query import ConsumerListCommand
from pulp.client.commands.options import OPTION_REPO_ID, OPTION_CONSUMER_ID
from pulp.client.commands.repo.cudl import ListRepositoriesCommand
from pulp_node import constants
from pulp_node.extension import (missing_resources, node_activated, repository_enabled,
ensure_node_section)
from pulp_node.extensions.admin import sync_schedules
from pulp_node.extensions.admin.options import (NODE_ID_OPTION, MAX_BANDWIDTH_OPTION,
MAX_CONCURRENCY_OPTION)
from pulp_node.extensions.admin.rendering import ProgressTracker, UpdateRenderer
NODE = _('Node')
CONSUMER = _('Consumer')
REPOSITORY = _('Repository')
REPO_NAME = 'repo'
ACTIVATE_NAME = 'activate'
DEACTIVATE_NAME = 'deactivate'
ENABLE_NAME = 'enable'
DISABLE_NAME = 'disable'
SYNC_NAME = 'sync'
PUBLISH_NAME = 'publish'
BIND_NAME = 'bind'
UNBIND_NAME = 'unbind'
UPDATE_NAME = 'run'
SCHEDULES_NAME = 'schedules'
NODE_LIST_DESC = _('list child nodes')
REPO_LIST_DESC = _('list node enabled repositories')
ACTIVATE_DESC = _('activate a consumer as a child node')
DEACTIVATE_DESC = _('deactivate a child node')
BIND_DESC = _('bind a child node to a repository')
UNBIND_DESC = _('removes the binding between a child node and a repository')
UPDATE_DESC = _('triggers an immediate synchronization of a child node')
ENABLE_DESC = _('enables binding to a repository by a child node')
DISABLE_DESC = _('disables binding to a repository by a child node')
REPO_DESC = _('repository related commands')
AUTO_PUBLISH_DESC = _('if "true", the nodes information will be automatically published each '
'time the repository is synchronized; defaults to "true"')
SYNC_DESC = _('child node synchronization commands')
PUBLISH_DESC = _('publishing commands')
STRATEGY_DESC = _('synchronization strategy (mirror|additive) default is additive')
SCHEDULES_DESC = _('manage node sync schedules')
NODE_LIST_TITLE = _('Child Nodes')
REPO_LIST_TITLE = _('Enabled Repositories')
AUTO_PUBLISH_OPTION = PulpCliOption('--auto-publish', AUTO_PUBLISH_DESC, required=False,
default='true')
STRATEGY_OPTION = \
PulpCliOption('--strategy', STRATEGY_DESC, required=False, default=constants.ADDITIVE_STRATEGY)
# --- messages ---------------------------------------------------------------
REPO_ENABLED = _('Repository enabled.')
REPO_DISABLED = _('Repository disabled.')
PUBLISH_SUCCEEDED = _('Publish succeeded.')
PUBLISH_FAILED = _('Publish failed. See: pulp log for details.')
NODE_ACTIVATED = _('Consumer activated as child node.')
NODE_DEACTIVATED = _('Child node deactivated.')
BIND_SUCCEEDED = _('Node bind succeeded.')
UNBIND_SUCCEEDED = _('Node unbind succeeded')
ALREADY_ENABLED = _('Repository already enabled. Nothing done.')
FAILED_NOT_ENABLED = _('Repository not enabled. See: the \'node repo enable\' command.')
NOT_BOUND_NOTHING_DONE = _('Node not bound to repository. No action performed.')
NOT_ACTIVATED_ERROR = _(
'%(t)s [ %(id)s ] not activated as a node. See: the \'node activate\' command.')
NOT_ACTIVATED_NOTHING_DONE = _('%(t)s is not activated as a node. No action performed.')
NOT_ENABLED_NOTHING_DONE = _('%(t)s not enabled. No action performed.')
STRATEGY_NOT_SUPPORTED = _('Strategy [ %(n)s ] not supported. Must be one of: %(s)s')
RESOURCE_MISSING_ERROR = _('%(t)s [ %(id)s ] not found on the server.')
ALREADY_ACTIVATED_NOTHING_DONE = _('%(n)s already activated as child node. No action performed.')
BIND_WARNING = \
_('Note: Repository [ %(r)s ] will be included in node synchronization.')
UNBIND_WARNING = \
_('Warning: Repository [ %(r)s ] will NOT be included in node synchronization')
ENABLE_WARNING = \
_('Note: Repository [ %(r)s ] will not be available for node synchronization until published.'
' See: the \'node repo publish\' command.')
AUTO_PUBLISH_WARNING = \
_('Warning: enabling with auto-publish may degrade repository synchronization performance.')
# --- extension loading ------------------------------------------------------
@priority()
def initialize(context):
"""
:type context: pulp.client.extensions.core.ClientContext
"""
node_section = ensure_node_section(context.cli)
node_section.add_command(NodeListCommand(context))
node_section.add_command(NodeActivateCommand(context))
node_section.add_command(NodeDeactivateCommand(context))
node_section.add_command(NodeBindCommand(context))
node_section.add_command(NodeUnbindCommand(context))
repo_section = node_section.create_subsection(REPO_NAME, REPO_DESC)
repo_section.add_command(NodeRepoEnableCommand(context))
repo_section.add_command(NodeRepoDisableCommand(context))
repo_section.add_command(NodeListRepositoriesCommand(context))
repo_section.add_command(NodeRepoPublishCommand(context))
sync_section = node_section.create_subsection(SYNC_NAME, SYNC_DESC)
sync_section.add_command(NodeUpdateCommand(context))
schedules_section = sync_section.create_subsection(SCHEDULES_NAME, SCHEDULES_DESC)
schedules_section.add_command(sync_schedules.NodeCreateScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeDeleteScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeUpdateScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeListScheduleCommand(context))
schedules_section.add_command(sync_schedules.NodeNextRunCommand(context))
# --- listing ----------------------------------------------------------------
class NodeListCommand(ConsumerListCommand):
STRATEGY_FIELD = 'update_strategy'
_ALL_FIELDS = ConsumerListCommand._ALL_FIELDS[0:-1] \
+ [STRATEGY_FIELD] + ConsumerListCommand._ALL_FIELDS[-1:]
def __init__(self, context):
super(NodeListCommand, self).__init__(context, description=NODE_LIST_DESC)
def get_title(self):
return NODE_LIST_TITLE
def get_consumer_list(self, kwargs):
nodes = []
for consumer in super(NodeListCommand, self).get_consumer_list(kwargs):
notes = consumer['notes']
if not notes.get(constants.NODE_NOTE_KEY):
continue
consumer[self.STRATEGY_FIELD] = \
notes.get(constants.STRATEGY_NOTE_KEY, constants.DEFAULT_STRATEGY)
nodes.append(consumer)
return nodes
def format_bindings(self, consumer):
formatted = {}
key = 'bindings'
for binding in consumer.get(key, []):
repo_id = binding['repo_id']
type_id = binding['type_id']
if type_id not in constants.ALL_DISTRIBUTORS:
# nodes only
continue
strategy = binding['binding_config'].get('strategy', constants.DEFAULT_STRATEGY)
repo_ids = formatted.get(strategy)
if repo_ids is None:
repo_ids = []
formatted[strategy] = repo_ids
repo_ids.append(repo_id)
consumer[key] = formatted
class NodeListRepositoriesCommand(ListRepositoriesCommand):
def __init__(self, context):
super(NodeListRepositoriesCommand, self).__init__(
context,
description=REPO_LIST_DESC,
repos_title=REPO_LIST_TITLE)
def get_repositories(self, query_params, **kwargs):
enabled = []
_super = super(NodeListRepositoriesCommand, self)
repositories = _super.get_repositories(query_params, **kwargs)
for repository in repositories:
repo_id = repository['id']
http = self.context.server.repo_distributor.distributors(repo_id)
for dist in http.response_body:
if dist['distributor_type_id'] in constants.ALL_DISTRIBUTORS:
enabled.append(repository)
return enabled
# --- publishing -------------------------------------------------------------
class NodeRepoPublishCommand(PollingCommand):
def __init__(self, context):
super(NodeRepoPublishCommand, self).__init__(PUBLISH_NAME, PUBLISH_DESC, self.run, context)
self.add_option(OPTION_REPO_ID)
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
if not repository_enabled(self.context, repo_id):
msg = FAILED_NOT_ENABLED
self.context.prompt.render_success_message(msg)
return
try:
http = self.context.server.repo_actions.publish(repo_id, constants.HTTP_DISTRIBUTOR, {})
task = http.response_body
self.poll([task], kwargs)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'repo_id':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
def succeeded(self, task):
self.context.prompt.render_success_message(PUBLISH_SUCCEEDED)
def failed(self, task):
self.context.prompt.render_failure_message(PUBLISH_FAILED)
# --- activation -------------------------------------------------------------
class NodeActivateCommand(PulpCliCommand):
def __init__(self, context):
super(NodeActivateCommand, self).__init__(ACTIVATE_NAME, ACTIVATE_DESC, self.run)
self.add_option(OPTION_CONSUMER_ID)
self.add_option(STRATEGY_OPTION)
self.context = context
def run(self, **kwargs):
consumer_id = kwargs[OPTION_CONSUMER_ID.keyword]
strategy = kwargs[STRATEGY_OPTION.keyword]
delta = {'notes': {constants.NODE_NOTE_KEY: True, constants.STRATEGY_NOTE_KEY: strategy}}
if node_activated(self.context, consumer_id):
msg = ALREADY_ACTIVATED_NOTHING_DONE % dict(n=CONSUMER)
self.context.prompt.render_success_message(msg)
return
if strategy not in constants.STRATEGIES:
msg = STRATEGY_NOT_SUPPORTED % dict(n=strategy, s=constants.STRATEGIES)
self.context.prompt.render_failure_message(msg)
return os.EX_DATAERR
try:
self.context.server.consumer.update(consumer_id, delta)
self.context.prompt.render_success_message(NODE_ACTIVATED)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'consumer':
msg = RESOURCE_MISSING_ERROR % dict(t=CONSUMER, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
class NodeDeactivateCommand(PulpCliCommand):
def __init__(self, context):
super(NodeDeactivateCommand, self).__init__(DEACTIVATE_NAME, DEACTIVATE_DESC, self.run)
self.add_option(NODE_ID_OPTION)
self.context = context
def run(self, **kwargs):
consumer_id = kwargs[NODE_ID_OPTION.keyword]
delta = {'notes': {constants.NODE_NOTE_KEY: None, constants.STRATEGY_NOTE_KEY: None}}
if not node_activated(self.context, consumer_id):
msg = NOT_ACTIVATED_NOTHING_DONE % dict(t=CONSUMER)
self.context.prompt.render_success_message(msg)
return
try:
self.context.server.consumer.update(consumer_id, delta)
self.context.prompt.render_success_message(NODE_DEACTIVATED)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'consumer':
msg = RESOURCE_MISSING_ERROR % dict(t=CONSUMER, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
# --- enable -----------------------------------------------------------------
class NodeRepoEnableCommand(PulpCliCommand):
def __init__(self, context):
super(NodeRepoEnableCommand, self).__init__(ENABLE_NAME, ENABLE_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(AUTO_PUBLISH_OPTION)
self.context = context
def run(self, **kwargs):
convert_boolean_arguments([AUTO_PUBLISH_OPTION.keyword], kwargs)
repo_id = kwargs[OPTION_REPO_ID.keyword]
auto_publish = kwargs[AUTO_PUBLISH_OPTION.keyword]
binding = self.context.server.repo_distributor
if repository_enabled(self.context, repo_id):
msg = ALREADY_ENABLED
self.context.prompt.render_success_message(msg)
return
try:
binding.create(
repo_id,
constants.HTTP_DISTRIBUTOR,
{},
auto_publish,
constants.HTTP_DISTRIBUTOR)
self.context.prompt.render_success_message(REPO_ENABLED)
self.context.prompt.render_warning_message(ENABLE_WARNING % dict(r=repo_id))
if auto_publish:
self.context.prompt.render_warning_message(AUTO_PUBLISH_WARNING)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'repository':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
class NodeRepoDisableCommand(PulpCliCommand):
def __init__(self, context):
super(NodeRepoDisableCommand, self).__init__(DISABLE_NAME, DISABLE_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.context = context
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
try:
self.context.server.repo_distributor.delete(repo_id, constants.HTTP_DISTRIBUTOR)
self.context.prompt.render_success_message(REPO_DISABLED)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'repository':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
self.context.prompt.render_failure_message(msg)
continue
if _type == 'distributor':
msg = NOT_ENABLED_NOTHING_DONE % dict(t=REPOSITORY)
self.context.prompt.render_success_message(msg)
continue
raise
return os.EX_DATAERR
class BindingCommand(PulpCliCommand):
def missing_resources(self, prompt, exception):
unhandled = []
for _id, _type in missing_resources(exception):
if _type == 'consumer_id':
msg = RESOURCE_MISSING_ERROR % dict(t=NODE, id=_id)
prompt.render_failure_message(msg)
continue
if _type == 'repo_id':
msg = RESOURCE_MISSING_ERROR % dict(t=REPOSITORY, id=_id)
prompt.render_failure_message(msg)
continue
unhandled.append((_id, _type))
return unhandled
class NodeBindCommand(BindingCommand):
def __init__(self, context):
super(NodeBindCommand, self).__init__(BIND_NAME, BIND_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(NODE_ID_OPTION)
self.add_option(STRATEGY_OPTION)
self.context = context
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
node_id = kwargs[NODE_ID_OPTION.keyword]
dist_id = constants.HTTP_DISTRIBUTOR
strategy = kwargs[STRATEGY_OPTION.keyword]
binding_config = {constants.STRATEGY_KEYWORD: strategy}
if not node_activated(self.context, node_id):
msg = NOT_ACTIVATED_ERROR % dict(t=CONSUMER, id=node_id)
self.context.prompt.render_failure_message(msg)
return os.EX_USAGE
if strategy not in constants.STRATEGIES:
msg = STRATEGY_NOT_SUPPORTED % dict(n=strategy, s=constants.STRATEGIES)
self.context.prompt.render_failure_message(msg)
return os.EX_DATAERR
try:
self.context.server.bind.bind(
node_id,
repo_id,
dist_id,
notify_agent=False,
binding_config=binding_config)
self.context.prompt.render_success_message(BIND_SUCCEEDED)
warning = BIND_WARNING % dict(r=repo_id)
self.context.prompt.render_warning_message(warning)
except NotFoundException, e:
unhandled = self.missing_resources(self.context.prompt, e)
for _id, _type in unhandled:
if _type == 'distributor':
msg = FAILED_NOT_ENABLED
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
class NodeUnbindCommand(BindingCommand):
def __init__(self, context):
super(NodeUnbindCommand, self).__init__(UNBIND_NAME, UNBIND_DESC, self.run)
self.add_option(OPTION_REPO_ID)
self.add_option(NODE_ID_OPTION)
self.context = context
def run(self, **kwargs):
repo_id = kwargs[OPTION_REPO_ID.keyword]
node_id = kwargs[NODE_ID_OPTION.keyword]
dist_id = constants.HTTP_DISTRIBUTOR
try:
self.context.server.bind.unbind(node_id, repo_id, dist_id)
self.context.prompt.render_success_message(UNBIND_SUCCEEDED)
warning = UNBIND_WARNING % dict(r=repo_id)
self.context.prompt.render_warning_message(warning)
except NotFoundException, e:
unhandled = self.missing_resources(self.context.prompt, e)
for _id, _type in unhandled:
if _type == 'bind_id':
msg = NOT_BOUND_NOTHING_DONE
self.context.prompt.render_success_message(msg)
else:
raise
return os.EX_DATAERR
class NodeUpdateCommand(PollingCommand):
def __init__(self, context):
super(NodeUpdateCommand, self).__init__(UPDATE_NAME, UPDATE_DESC, self.run, context)
self.add_option(NODE_ID_OPTION)
self.add_option(MAX_CONCURRENCY_OPTION)
self.add_option(MAX_BANDWIDTH_OPTION)
self.tracker = ProgressTracker(self.context.prompt)
def run(self, **kwargs):
node_id = kwargs[NODE_ID_OPTION.keyword]
max_bandwidth = kwargs[MAX_BANDWIDTH_OPTION.keyword]
max_concurrency = kwargs[MAX_CONCURRENCY_OPTION.keyword]
units = [dict(type_id='node', unit_key=None)]
options = {
constants.MAX_DOWNLOAD_BANDWIDTH_KEYWORD: max_bandwidth,
constants.MAX_DOWNLOAD_CONCURRENCY_KEYWORD: max_concurrency,
}
if not node_activated(self.context, node_id):
msg = NOT_ACTIVATED_ERROR % dict(t=CONSUMER, id=node_id)
self.context.prompt.render_failure_message(msg)
return os.EX_USAGE
try:
http = self.context.server.consumer_content.update(node_id, units=units,
options=options)
task = http.response_body
self.poll([task], kwargs)
except NotFoundException, e:
for _id, _type in missing_resources(e):
if _type == 'consumer':
msg = RESOURCE_MISSING_ERROR % dict(t=NODE, id=_id)
self.context.prompt.render_failure_message(msg)
else:
raise
return os.EX_DATAERR
def progress(self, task, spinner):
self.tracker.display(task.progress_report)
def succeeded(self, task):
report = task.result['details'].values()[0]
r = UpdateRenderer(self.context.prompt, report)
r.render()
|
jeremycline/pulp
|
nodes/extensions/admin/pulp_node/extensions/admin/commands.py
|
Python
|
gpl-2.0
| 20,361
|
import time
import requests
import answer
min_question_id = 1000
max_question_id = 5604
sleep_time = 10
submit_url = 'http://acm.hdu.edu.cn/submit.php?action=submit'
login_url = 'http://acm.hdu.edu.cn/userloginex.php?action=login'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36',
}
class HDU:
def __init__(self, username, password):
# os.chdir(sys.path[0])
self.session = requests.Session()
self.session.headers = headers
self.username = username
self.password = password
self.is_login = False
# self.login(username, password)
def login(self):
data = {
"userpass": self.password,
"username": self.username,
'login': 'Sign In'
}
res = self.session.post(login_url, data=data)
# print(res.text.encode('UTF-8'))
if res.status_code == 200:
self.is_login = True
return True
return False
def submit(self, problem, code):
data = {
"check": "0",
"problemid": str(problem),
"usercode": code,
"language": "0"
}
res = self.session.post(submit_url, data=data)
# print(res.text.encode('UTF-8'))
# TODO
if res.status_code == 200:
return True
return False
def get_session(self):
return self.session
# TODO
def get_state(self, problem):
return False
def solve(user, id):
answers = answer.get_answer("%s%d" % ('hdu', id))
if answers is None or answers == []:
print(None)
return None
count = 1
for ans in answers:
# print(ans)
user.submit(id, ans)
print('submit', count)
if count is 2:
break
count += 1
time.sleep(sleep_time)
return None
def hdu():
print("HDU:")
# name = input("Your username: ")
# password = input("Your password: ")
name = 'printhello'
password = '123456'
my = HDU(name, password)
if my.login() is False:
return
# return
for i in range(4979, max_question_id):
print('id :', i)
solve(my, i)
time.sleep(sleep_time)
return None
if __name__ == '__main__':
hdu()
|
hnu2013wwj/DH-AutoAC
|
Python/hdu.py
|
Python
|
gpl-2.0
| 2,371
|
#!/usr/bin/env python
# Python Network Programming Cookbook -- Chapter - 5
# This program requires Python 2.7 or any later version
import os
import cgi
import argparse
import BaseHTTPServer
import CGIHTTPServer
import cgitb
cgitb.enable() ## enable CGI error reporting
def web_server(port):
server = BaseHTTPServer.HTTPServer
handler = CGIHTTPServer.CGIHTTPRequestHandler #RequestsHandler
server_address = ("", port)
handler.cgi_directories = ["/cgi-bin", ]
httpd = server(server_address, handler)
print "Starting web server with CGI support on port: %s ..." %port
httpd.serve_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CGI Server Example')
parser.add_argument('--port', action="store", dest="port", type=int, required=True)
given_args = parser.parse_args()
web_server(given_args.port)
|
simontakite/sysadmin
|
pythonscripts/pythonnetworkingcoookbook/chapter5/5_7_cgi_server.py
|
Python
|
gpl-2.0
| 877
|
# Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import cStringIO
from . import exception as dns_exception
from . import rdata as dns_rdata
from . import rdatatype as dns_rdatatype
from . import name as dns_name
class NSEC(dns_rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns_name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns_rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns_rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns_exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns_exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset / 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns_name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns_exception.FormError("NSEC too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns_exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns_exception.FormError("bad NSEC bitmap length")
bitmap = wire[current : current + octets]
current += octets
rdlen -= octets
windows.append((window, bitmap))
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.next, other.next)
if v == 0:
b1 = cStringIO.StringIO()
for (window, bitmap) in self.windows:
b1.write(chr(window))
b1.write(chr(len(bitmap)))
b1.write(bitmap)
b2 = cStringIO.StringIO()
for (window, bitmap) in other.windows:
b2.write(chr(window))
b2.write(chr(len(bitmap)))
b2.write(bitmap)
v = cmp(b1.getvalue(), b2.getvalue())
return v
|
Jamlum/pytomo
|
pytomo/dns/rdtypes/ANY/NSEC.py
|
Python
|
gpl-2.0
| 5,407
|
import os
from autotest.client import test, utils
# tests is a simple array of "cmd" "arguments"
tests = [["aio-dio-invalidate-failure", "poo"],
["aio-dio-subblock-eof-read", "eoftest"],
["aio-free-ring-with-bogus-nr-pages", ""],
["aio-io-setup-with-nonwritable-context-pointer", ""],
["aio-dio-extend-stat", "file"],
]
name = 0
arglist = 1
class aio_dio_bugs(test.test):
version = 5
preserve_srcdir = True
def initialize(self):
self.job.require_gcc()
self.job.setup_dep(['libaio'])
ldflags = '-L ' + self.autodir + '/deps/libaio/lib'
cflags = '-I ' + self.autodir + '/deps/libaio/include'
self.gcc_flags = ldflags + ' ' + cflags
def setup(self):
os.chdir(self.srcdir)
utils.make('"CFLAGS=' + self.gcc_flags + '"')
def execute(self, args = ''):
os.chdir(self.tmpdir)
libs = self.autodir + '/deps/libaio/lib/'
ld_path = utils.prepend_path(libs,
utils.environ('LD_LIBRARY_PATH'))
var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
for test in tests:
cmd = self.srcdir + '/' + test[name] + ' ' + args + ' ' \
+ test[arglist]
utils.system(var_ld_path + ' ' + cmd)
|
nacc/autotest
|
client/tests/aio_dio_bugs/aio_dio_bugs.py
|
Python
|
gpl-2.0
| 1,337
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham, A. Roitman
# Copyright (C) 2007-2009 B. Malengier
# Copyright (C) 2008 Lukasz Rymarczyk
# Copyright (C) 2008 Raphael Ackermann
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2012 Doug Blank
# Copyright (C) 2012-2013 Paul Franklin
# Copyright (C) 2017 Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Module responsible for handling the command line arguments for Gramps.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import sys
import os
import getopt
import logging
import shutil
from glob import glob
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import (LONGOPTS, SHORTOPTS, USER_PLUGINS, VERSION_DIR,
HOME_DIR, TEMP_DIR, THUMB_DIR, ENV_DIR, USER_CSS)
from gramps.gen.utils.cast import get_type_converter
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
_HELP = _("""
Usage: gramps.py [OPTION...]
--load-modules=MODULE1,MODULE2,... Dynamic modules to load
Help options
-?, --help Show this help message
--usage Display brief usage message
Application options
-O, --open=FAMILY_TREE Open Family Tree
-U, --username=USERNAME Database username
-P, --password=PASSWORD Database password
-C, --create=FAMILY_TREE Create on open if new Family Tree
-i, --import=FILENAME Import file
-e, --export=FILENAME Export file
-r, --remove=FAMILY_TREE_PATTERN Remove matching Family Tree(s) (use regular expressions)
-f, --format=FORMAT Specify Family Tree format
-a, --action=ACTION Specify action
-p, --options=OPTIONS_STRING Specify options
-d, --debug=LOGGER_NAME Enable debug logs
-l [FAMILY_TREE_PATTERN...] List Family Trees
-L [FAMILY_TREE_PATTERN...] List Family Trees in Detail
-t [FAMILY_TREE_PATTERN...] List Family Trees, tab delimited
-u, --force-unlock Force unlock of Family Tree
-s, --show Show config settings
-c, --config=[config.setting[:value]] Set config setting(s) and start Gramps
-y, --yes Don't ask to confirm dangerous actions (non-GUI mode only)
-q, --quiet Suppress progress indication output (non-GUI mode only)
-v, --version Show versions
-S, --safe Start Gramps in 'Safe mode'
(temporarily use default settings)
-D, --default=[APXFE] Reset settings to default;
A - addons are cleared
P - Preferences to default
X - Books are cleared, reports and tool settings to default
F - filters are cleared
E - Everything is set to default or cleared
""")
_USAGE = _("""
Example of usage of Gramps command line interface
1. To import four databases (whose formats can be determined from their names)
and then check the resulting database for errors, one may type:
gramps -i file1.ged -i file2.gpkg -i ~/db3.gramps -i file4.wft -a tool -p name=check.
2. To explicitly specify the formats in the above example, append filenames with appropriate -f options:
gramps -i file1.ged -f gedcom -i file2.gpkg -f gramps-pkg -i ~/db3.gramps -f gramps-xml -i file4.wft -f wft -a tool -p name=check.
3. To record the database resulting from all imports, supply -e flag
(use -f if the filename does not allow Gramps to guess the format):
gramps -i file1.ged -i file2.gpkg -e ~/new-package -f gramps-pkg
4. To save any error messages of the above example into files outfile and errfile, run:
gramps -i file1.ged -i file2.dpkg -e ~/new-package -f gramps-pkg >outfile 2>errfile
5. To import three databases and start interactive Gramps session with the result:
gramps -i file1.ged -i file2.gpkg -i ~/db3.gramps
6. To open a database and, based on that data, generate timeline report in PDF format
putting the output into the my_timeline.pdf file:
gramps -O 'Family Tree 1' -a report -p name=timeline,off=pdf,of=my_timeline.pdf
7. To generate a summary of a database:
gramps -O 'Family Tree 1' -a report -p name=summary
8. Listing report options
Use the name=timeline,show=all to find out about all available options for the timeline report.
To find out details of a particular option, use show=option_name , e.g. name=timeline,show=off string.
To learn about available report names, use name=show string.
9. To convert a Family Tree on the fly to a .gramps xml file:
gramps -O 'Family Tree 1' -e output.gramps -f gramps-xml
10. To generate a web site into an other locale (in german):
LANGUAGE=de_DE; LANG=de_DE.UTF-8 gramps -O 'Family Tree 1' -a report -p name=navwebpage,target=/../de
11. Finally, to start normal interactive session type:
gramps
Note: These examples are for bash shell.
Syntax may be different for other shells and for Windows.
""")
#-------------------------------------------------------------------------
# ArgParser
#-------------------------------------------------------------------------
class ArgParser:
"""
This class is responsible for parsing the command line arguments (if any)
given to gramps, and determining if a GUI or a CLI session must be started.
A filename and/or options may be specified as arguments.
The valid options are:
-O, --open=FAMILY_TREE Open Family Tree
-U, --username=USERNAME Database username
-P, --password=PASSWORD Database password
-C, --create=FAMILY_TREE Create on open if new Family Tree
-i, --import=FILENAME Import file
-e, --export=FILENAME Export file
-r, --remove=PATTERN Remove matching Family Tree(s)
-f, --format=FORMAT Specify Family Tree format
-a, --action=ACTION Specify action
-p, --options=OPTIONS_STRING Specify options
-d, --debug=LOGGER_NAME Enable debug logs
-l [FAMILY_TREE...] List Family Trees
-L [FAMILY_TREE...] List Family Trees in Detail
-t [FAMILY_TREE...] List Family Trees, tab delimited
-u, --force-unlock Force unlock of Family Tree
-s, --show Show config settings
-c, --config=SETTINGS Set config setting(s) and start Gramps
-y, --yes Don't ask to confirm dangerous actions
-q, --quiet Suppress progress indication output
-v, --version Show versions
-h, --help Display the help
--usage Display usage information
If the filename (no options) is specified, the interactive session is
launched using data from filename. In this mode (filename, no options), the
rest of the arguments are ignored. This is a mode suitable by default for
GUI launchers, mime type handlers, and the like.
If no filename or -i option is given, a new interactive session (empty
database) is launched, since no data is given anyway.
If -O or -i option is given, but no -e or -a options are given, an
interactive session is launched with the ``FILENAME`` (specified with -i).
If both input (-O or -i) and processing (-e or -a) options are given,
interactive session will not be launched.
When using import or export options (-i or -e), the -f option may be
specified to indicate the family tree format.
Possible values for ``ACTION`` are: 'report', 'book' and 'tool'.
Configuration ``SETTINGS`` may be specified using the -c option. The
settings are of the form config.setting[:value]. If used without a value,
the setting is shown.
If the -y option is given, the user's acceptance of any CLI prompt is
assumed. (see :meth:`.cli.user.User.prompt`)
If the -q option is given, extra noise on sys.stderr, such as progress
indicators, is suppressed.
"""
def __init__(self, args):
"""
Pass the command line arguments on creation.
"""
self.args = args
self.open_gui = None
self.open = None
self.username = None
self.password = None
self.exports = []
self.actions = []
self.imports = []
self.removes = []
self.imp_db_path = None
self.list = False
self.list_more = False
self.list_table = False
self.database_names = None
self.help = False
self.usage = False
self.force_unlock = False
self.create = None
self.quiet = False
self.auto_accept = False
self.errors = []
self.parse_args()
#-------------------------------------------------------------------------
# Argument parser: sorts out given arguments
#-------------------------------------------------------------------------
def parse_args(self):
"""
Fill in lists with open, exports, imports, and actions options.
Any errors are added to self.errors
"""
try:
options, leftargs = getopt.getopt(self.args[1:],
SHORTOPTS, LONGOPTS)
except getopt.GetoptError as msg:
# Extract the arguments in the list.
# The % operator replaces the list elements
# with repr() of the list elements
# which is OK for latin characters,
# but not for non latin characters in list elements
cliargs = "[ "
for arg in range(len(self.args) - 1):
cliargs += self.args[arg + 1] + " "
cliargs += "]"
# Must first do str() of the msg object.
msg = str(msg)
self.errors += [(_('Error parsing the arguments'),
msg + '\n' +
_("Error parsing the arguments: %s \n"
"Type gramps --help for an overview of "
"commands, or read the manual pages."
) % cliargs)]
return
# Some args can work on a list of databases:
if leftargs:
for opt_ix in range(len(options)):
option, value = options[opt_ix]
if option in ['-L', '-l', '-t']:
self.database_names = leftargs
leftargs = []
if leftargs:
# if there were an argument without option,
# use it as a file to open and return
self.open_gui = leftargs[0]
print(_("Trying to open: %s ..."
) % leftargs[0],
file=sys.stderr)
#see if force open is on
for opt_ix in range(len(options)):
option, value = options[opt_ix]
if option in ('-u', '--force-unlock'):
self.force_unlock = True
break
return
# Go over all given option and place them into appropriate lists
cleandbg = []
need_to_quit = False
for opt_ix in range(len(options)):
option, value = options[opt_ix]
if option in ['-O', '--open']:
self.open = value
elif option in ['-C', '--create']:
self.create = value
elif option in ['-U', '--username']:
self.username = value
elif option in ['-P', '--password']:
self.password = value
elif option in ['-i', '--import']:
family_tree_format = None
if (opt_ix < len(options) - 1
and options[opt_ix + 1][0] in ('-f', '--format')):
family_tree_format = options[opt_ix + 1][1]
self.imports.append((value, family_tree_format))
elif option in ['-r', '--remove']:
self.removes.append(value)
elif option in ['-e', '--export']:
family_tree_format = None
if (opt_ix < len(options) - 1
and options[opt_ix + 1][0] in ('-f', '--format')):
family_tree_format = options[opt_ix + 1][1]
abs_name = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(abs_name):
# The file doesn't exists, try to create it.
try:
open(abs_name, 'w').close()
os.unlink(abs_name)
except OSError as e:
message = _("WARNING: %(strerr)s "
"(errno=%(errno)s):\n"
"WARNING: %(name)s\n") % {
'strerr' : e.strerror,
'errno' : e.errno,
'name' : e.filename}
print(message)
sys.exit(1)
self.exports.append((value, family_tree_format))
elif option in ['-a', '--action']:
action = value
if action not in ('report', 'tool', 'book'):
print(_("Unknown action: %s. Ignoring."
) % action,
file=sys.stderr)
continue
options_str = ""
if (opt_ix < len(options)-1
and options[opt_ix+1][0] in ('-p', '--options')):
options_str = options[opt_ix+1][1]
self.actions.append((action, options_str))
elif option in ['-d', '--debug']:
print(_('setup debugging'), value, file=sys.stderr)
logger = logging.getLogger(value)
logger.setLevel(logging.DEBUG)
cleandbg += [opt_ix]
elif option in ['-l']:
self.list = True
elif option in ['-L']:
self.list_more = True
elif option in ['-t']:
self.list_table = True
elif option in ['-s', '--show']:
from gramps.gen.config import config
print(_("Gramps config settings from %s:"
) % config.filename)
for sect in config.data:
for setting in config.data[sect]:
print("%s.%s=%s" % (sect, setting,
repr(config.data[sect][setting])))
print()
sys.exit(0)
elif option in ['-c', '--config']:
from gramps.gen.config import config
cfg_name = value
set_value = False
if cfg_name:
if ":" in cfg_name:
cfg_name, new_value = cfg_name.split(":", 1)
set_value = True
if config.has_default(cfg_name):
setting_value = config.get(cfg_name)
print(_("Current Gramps config setting: "
"%(name)s:%(value)s"
) % {'name' : cfg_name,
'value' : repr(setting_value)},
file=sys.stderr)
if set_value:
# does a user want the default config value?
if new_value in ("DEFAULT", _("DEFAULT")):
new_value = config.get_default(cfg_name)
else:
converter = get_type_converter(setting_value)
new_value = converter(new_value)
config.set(cfg_name, new_value)
# translators: indent "New" to match "Current"
print(_(" New Gramps config setting: "
"%(name)s:%(value)s"
) % {'name' : cfg_name,
'value' : repr(config.get(cfg_name))},
file=sys.stderr)
else:
need_to_quit = True
else:
print(_("Gramps: no such config setting: '%s'"
) % cfg_name,
file=sys.stderr)
need_to_quit = True
cleandbg += [opt_ix]
elif option in ['-h', '-?', '--help']:
self.help = True
elif option in ['-u', '--force-unlock']:
self.force_unlock = True
elif option in ['--usage']:
self.usage = True
elif option in ['-y', '--yes']:
self.auto_accept = True
elif option in ['-q', '--quiet']:
self.quiet = True
elif option in ['-S', '--safe']:
cleandbg += [opt_ix]
elif option in ['-D', '--default']:
def rmtree(path):
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
if 'E' in value or 'A' in value: # clear addons
rmtree(USER_PLUGINS)
if 'E' in value or 'P' in value: # clear ini preferences
for fil in glob(os.path.join(VERSION_DIR, "*.*")):
if "custom_filters.xml" in fil:
continue
os.remove(fil)
# create gramps.ini so config won't load the one from an
# older version of Gramps.
with open(os.path.join(VERSION_DIR, 'gramps.ini'), 'w'):
pass
if 'E' in value or 'F' in value: # clear filters
fil = os.path.join(VERSION_DIR, "custom_filters.xml")
if os.path.isfile(fil):
os.remove(fil)
if 'E' in value or 'X' in value: # clear xml reports/tools
for fil in glob(os.path.join(HOME_DIR, "*.xml")):
os.remove(fil)
if 'E' in value or 'Z' in value: # clear upgrade zips
for fil in glob(os.path.join(HOME_DIR, "*.zip")):
os.remove(fil)
if 'E' in value: # Everything else
rmtree(TEMP_DIR)
rmtree(THUMB_DIR)
rmtree(USER_CSS)
rmtree(ENV_DIR)
rmtree(os.path.join(HOME_DIR, "maps"))
for fil in glob(os.path.join(HOME_DIR, "*")):
if os.path.isfile(fil):
os.remove(fil)
sys.exit(0) # Done with Default
#clean options list
cleandbg.reverse()
for ind in cleandbg:
del options[ind]
if (len(options) > 0
and self.open is None
and self.imports == []
and self.removes == []
and not (self.list
or self.list_more
or self.list_table
or self.help)):
# Extract and convert to unicode the arguments in the list.
# The % operator replaces the list elements with repr() of
# the list elements, which is OK for latin characters
# but not for non-latin characters in list elements
cliargs = "[ "
for arg in range(len(self.args) - 1):
cliargs += self.args[arg + 1] + ' '
cliargs += "]"
self.errors += [(_('Error parsing the arguments'),
_("Error parsing the arguments: %s \n"
"To use in the command-line mode, supply at "
"least one input file to process."
) % cliargs)]
if need_to_quit:
sys.exit(0)
#-------------------------------------------------------------------------
# Determine the need for GUI
#-------------------------------------------------------------------------
def need_gui(self):
"""
Determine whether we need a GUI session for the given tasks.
"""
if self.errors:
#errors in argument parsing ==> give cli error, no gui needed
return False
if len(self.removes) > 0:
return False
if self.list or self.list_more or self.list_table or self.help:
return False
if self.open_gui:
# No-option argument, definitely GUI
return True
# If we have data to work with:
if self.open or self.imports:
if self.exports or self.actions:
# have both data and what to do with it => no GUI
return False
elif self.create:
if self.open: # create an empty DB, open a GUI to fill it
return True
else: # create a DB, then do the import, with no GUI
self.open = self.create
return False
else:
# data given, but no action/export => GUI
return True
# No data, can only do GUI here
return True
def print_help(self):
"""
If the user gives the --help or -h option, print the output to terminal.
"""
if self.help:
print(_HELP)
sys.exit(0)
def print_usage(self):
"""
If the user gives the --usage print the output to terminal.
"""
if self.usage:
print(_USAGE)
sys.exit(0)
|
sam-m888/gramps
|
gramps/cli/argparser.py
|
Python
|
gpl-2.0
| 23,180
|
from glob import glob
import fitsio
import sys
from astrometry.util.fits import *
from astrometry.util.file import *
from astrometry.util.starutil_numpy import *
from astrometry.libkd.spherematch import *
from collections import Counter
from legacypipe.oneblob import _select_model
from legacypipe.survey import wcs_for_brick
from astrometry.util.multiproc import multiproc
B = fits_table('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/survey-bricks.fits.gz')
def patch_one(X):
(ifn, Nfns, fn) = X
T8 = fits_table(fn)
phdr = fitsio.read_header(fn)
hdr = T8.get_header()
amfn = fn.replace('/tractor-', '/all-models-').replace('/tractor/', '/metrics/')
A = fits_table(amfn)
Ahdr = fitsio.read_header(amfn)
abands = Ahdr['BANDS'].strip()
nparams = dict(ptsrc=2, simple=2, rex=3, exp=5, dev=5, comp=9)
galaxy_margin = 3.**2 + (nparams['exp'] - nparams['ptsrc'])
rex = True
brick = B[B.brickname == T8.brickname[0]]
brick = brick[0]
brickwcs = wcs_for_brick(brick)
assert(len(A) == len(np.flatnonzero(T8.type != 'DUP ')))
typemap = dict(ptsrc='PSF', rex='REX', dev='DEV', exp='EXP', comp='COMP')
Tnew = T8.copy()
npatched = 0
for i,(d,ttype) in enumerate(zip(A.dchisq, T8.type)):
dchisqs = dict(zip(['ptsrc','rex','dev','exp','comp'], d))
mod = _select_model(dchisqs, nparams, galaxy_margin, rex)
ttype = ttype.strip()
# The DUP elements appear at the end, and we *zip* A and T8; A does not contain the DUPs
# so is shorter by the number of DUP elements.
assert(ttype != 'DUP')
newtype = typemap[mod]
# type unchanged
if ttype == newtype:
continue
# Copy fit values from the "newtype" entries in all-models
Tnew.type[i] = '%-4s' % newtype
cols = ['ra', 'dec', 'ra_ivar', 'dec_ivar']
nt = newtype.lower()
for c in cols:
Tnew.get(c)[i] = A.get('%s_%s' % (nt,c))[i]
# expand flux, flux_ivar
for c in ['flux', 'flux_ivar']:
flux = A.get('%s_%s' % (nt,c))[i]
if len(abands) == 1:
Tnew.get('%s_%s' % (c,abands[0]))[i] = flux
else:
for ib,band in enumerate(abands):
Tnew.get('%s_%s' % (c,band))[i] = flux[ib]
cc = []
if newtype in ['EXP', 'COMP']:
cc.append('exp')
if newtype in ['DEV', 'COMP']:
cc.append('dev')
for c1 in cc:
for c2 in ['e1','e2','r']:
for c3 in ['', '_ivar']:
c = 'shape%s_%s%s' % (c1, c2, c3)
ac = '%s_shape%s_%s%s' % (nt, c1, c2, c3)
Tnew.get(c)[i] = A.get(ac)[i]
if newtype == 'COMP':
Tnew.fracdev[i] = A.comp_fracdev[i]
Tnew.fracdev_ivar[i] = A.comp_fracdev_ivar[i]
if newtype == 'PSF':
# Zero out
for c1 in ['dev','exp']:
for c2 in ['e1','e2','r']:
for c3 in ['', '_ivar']:
c = 'shape%s_%s%s' % (c1, c2, c3)
Tnew.get(c)[i] = 0.
Tnew.fracdev[i] = 0.
Tnew.fracdev_ivar[i] = 0.
# recompute bx,by, brick_primary
ok,x,y = brickwcs.radec2pixelxy(Tnew.ra[i], Tnew.dec[i])
Tnew.bx[i] = x-1.
Tnew.by[i] = y-1.
Tnew.brick_primary[i] = ((Tnew.ra[i] >= brick.ra1 ) * (Tnew.ra[i] < brick.ra2) *
(Tnew.dec[i] >= brick.dec1) * (Tnew.dec[i] < brick.dec2))
npatched += 1
print('%i of %i: %s patching %i of %i sources' % (ifn+1, Nfns, os.path.basename(fn), npatched, len(Tnew)))
if npatched == 0:
return
phdr.add_record(dict(name='PATCHED', value=npatched,
comment='Patched DR8.2.1 model-sel bug'))
outfn = fn.replace('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/',
'patched/')
outdir = os.path.dirname(outfn)
try:
os.makedirs(outdir)
except:
pass
Tnew.writeto(outfn, header=hdr, primheader=phdr)
def main():
#fns = glob('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/000/tractor-000??00?.fits')
fns = glob('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/*/tractor-*.fits')
fns.sort()
print(len(fns), 'Tractor catalogs')
vers = Counter()
keepfns = []
for fn in fns:
hdr = fitsio.read_header(fn)
ver = hdr['LEGPIPEV']
ver = ver.strip()
vers[ver] += 1
if ver == 'DR8.2.1':
keepfns.append(fn)
print('Header versions:', vers.most_common())
fns = keepfns
print('Keeping', len(fns), 'with bad version')
N = len(fns)
args = [(i,N,fn) for i,fn in enumerate(fns)]
mp = multiproc(8)
mp.map(patch_one, args)
if __name__ == '__main__':
main()
|
legacysurvey/pipeline
|
py/legacyanalysis/fix-model-selection.py
|
Python
|
gpl-2.0
| 4,976
|
#!/usr/bin/python3
# coding: utf-8
import gi
gi.require_version('CinnamonDesktop', '3.0')
from gi.repository import Gtk, GdkPixbuf, Gio, GLib, GObject, Gdk
from util import utils, trackers
MAX_IMAGE_SIZE = 320
MAX_IMAGE_SIZE_LOW_RES = 200
class FramedImage(Gtk.Image):
"""
Widget to hold the user face image. It attempts to display an image at
its native size, up to a max sane size.
"""
__gsignals__ = {
"surface-changed": (GObject.SignalFlags.RUN_LAST, None, (object,))
}
def __init__(self, low_res=False, scale_up=False):
super(FramedImage, self).__init__()
self.get_style_context().add_class("framedimage")
self.cancellable = None
self.file = None
self.path = None
self.scale_up = scale_up
if low_res:
self.max_size = MAX_IMAGE_SIZE_LOW_RES
else:
self.max_size = MAX_IMAGE_SIZE
trackers.con_tracker_get().connect(self, "realize", self.on_realized)
def on_realized(self, widget):
self.generate_image()
def clear_image(self):
self.set_from_surface(None)
self.emit("surface-changed", None)
def set_from_path(self, path):
self.path = path
self.file = None
if self.get_realized():
self.generate_image()
def set_from_file(self, file):
self.file = file
self.path = None
if self.get_realized():
self.generate_image()
def set_image_internal(self, path):
pixbuf = None
scaled_max_size = self.max_size * self.get_scale_factor()
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file(path)
except GLib.Error as e:
message = "Could not load pixbuf from '%s' for FramedImage: %s" % (path, e.message)
error = True
if pixbuf != None:
if (pixbuf.get_height() > scaled_max_size or pixbuf.get_width() > scaled_max_size) or \
(self.scale_up and (pixbuf.get_height() < scaled_max_size / 2 or pixbuf.get_width() < scaled_max_size / 2)):
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(path, scaled_max_size, scaled_max_size)
except GLib.Error as e:
message = "Could not scale pixbuf from '%s' for FramedImage: %s" % (path, e.message)
error = True
if pixbuf:
surface = Gdk.cairo_surface_create_from_pixbuf(pixbuf,
self.get_scale_factor(),
self.get_window())
self.set_from_surface(surface)
self.emit("surface-changed", surface)
else:
print(message)
self.clear_image()
def generate_image(self):
if self.path:
self.set_image_internal(self.path)
elif self.file:
if self.cancellable != None:
self.cancellable.cancel()
self.cancellable = None
self.cancellable = Gio.Cancellable()
self.file.load_contents_async(self.cancellable, self.load_contents_async_callback)
def load_contents_async_callback(self, file, result, data=None):
try:
success, contents, etag_out = file.load_contents_finish(result)
except GLib.Error:
self.clear_image()
return
if contents:
cache_name = GLib.build_filenamev([GLib.get_user_cache_dir(), "cinnamon-screensaver-albumart-temp"])
cache_file = Gio.File.new_for_path(cache_name)
cache_file.replace_contents_async(contents,
None,
False,
Gio.FileCreateFlags.REPLACE_DESTINATION,
self.cancellable,
self.on_file_written)
def on_file_written(self, file, result, data=None):
try:
if file.replace_contents_finish(result):
self.set_image_internal(file.get_path())
except GLib.Error:
pass
|
leigh123linux/cinnamon-screensaver
|
src/widgets/framedImage.py
|
Python
|
gpl-2.0
| 4,227
|
# This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
The _TLSAutomaton class provides methods common to both TLS client and server.
"""
import struct
from scapy.automaton import Automaton
from scapy.config import conf
from scapy.error import log_interactive
from scapy.packet import Raw
from scapy.layers.tls.basefields import _tls_type
from scapy.layers.tls.cert import Cert, PrivKey
from scapy.layers.tls.record import TLS
from scapy.layers.tls.record_sslv2 import SSLv2
from scapy.layers.tls.record_tls13 import TLS13
class _TLSAutomaton(Automaton):
"""
SSLv3 and TLS 1.0-1.2 typically need a 2-RTT handshake:
Client Server
| --------->>> | C1 - ClientHello
| <<<--------- | S1 - ServerHello
| <<<--------- | S1 - Certificate
| <<<--------- | S1 - ServerKeyExchange
| <<<--------- | S1 - ServerHelloDone
| --------->>> | C2 - ClientKeyExchange
| --------->>> | C2 - ChangeCipherSpec
| --------->>> | C2 - Finished [encrypted]
| <<<--------- | S2 - ChangeCipherSpec
| <<<--------- | S2 - Finished [encrypted]
We call these successive groups of messages:
ClientFlight1, ServerFlight1, ClientFlight2 and ServerFlight2.
We want to send our messages from the same flight all at once through the
socket. This is achieved by managing a list of records in 'buffer_out'.
We may put several messages (i.e. what RFC 5246 calls the record fragments)
in the same record when possible, but we may need several records for the
same flight, as with ClientFlight2.
However, note that the flights from the opposite side may be spread wildly
across TLS records and TCP packets. This is why we use a 'get_next_msg'
method for feeding a list of received messages, 'buffer_in'. Raw data
which has not yet been interpreted as a TLS record is kept in 'remain_in'.
"""
def parse_args(self, mycert=None, mykey=None, **kargs):
super(_TLSAutomaton, self).parse_args(**kargs)
self.socket = None
self.remain_in = b""
self.buffer_in = [] # these are 'fragments' inside records
self.buffer_out = [] # these are records
self.cur_session = None
self.cur_pkt = None # this is usually the latest parsed packet
if mycert:
self.mycert = Cert(mycert)
else:
self.mycert = None
if mykey:
self.mykey = PrivKey(mykey)
else:
self.mykey = None
self.verbose = kargs.get("verbose", True)
def get_next_msg(self, socket_timeout=2, retry=2):
"""
The purpose of the function is to make next message(s) available in
self.buffer_in. If the list is not empty, nothing is done. If not, in
order to fill it, the function uses the data already available in
self.remain_in from a previous call and waits till there are enough to
dissect a TLS packet. Once dissected, the content of the TLS packet
(carried messages, or 'fragments') is appended to self.buffer_in.
We have to grab enough data to dissect a TLS packet. We start by
reading the first 2 bytes. Unless we get anything different from
\\x14\\x03, \\x15\\x03, \\x16\\x03 or \\x17\\x03 (which might indicate
an SSLv2 record, whose first 2 bytes encode the length), we retrieve
3 more bytes in order to get the length of the TLS record, and
finally we can retrieve the remaining of the record.
"""
if self.buffer_in:
# A message is already available.
return
self.socket.settimeout(socket_timeout)
is_sslv2_msg = False
still_getting_len = True
grablen = 2
while retry and (still_getting_len or len(self.remain_in) < grablen):
if not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5:
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
still_getting_len = False
elif grablen == 2 and len(self.remain_in) >= 2:
byte0 = struct.unpack("B", self.remain_in[:1])[0]
byte1 = struct.unpack("B", self.remain_in[1:2])[0]
if (byte0 in _tls_type) and (byte1 == 3):
# Retry following TLS scheme. This will cause failure
# for SSLv2 packets with length 0x1{4-7}03.
grablen = 5
else:
# Extract the SSLv2 length.
is_sslv2_msg = True
still_getting_len = False
if byte0 & 0x80:
grablen = 2 + 0 + ((byte0 & 0x7f) << 8) + byte1
else:
grablen = 2 + 1 + ((byte0 & 0x3f) << 8) + byte1
elif not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5: # noqa: E501
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
if grablen == len(self.remain_in):
break
try:
tmp = self.socket.recv(grablen - len(self.remain_in))
if not tmp:
retry -= 1
else:
self.remain_in += tmp
except Exception:
self.vprint("Could not join host ! Retrying...")
retry -= 1
if len(self.remain_in) < 2 or len(self.remain_in) != grablen:
# Remote peer is not willing to respond
return
p = TLS(self.remain_in, tls_session=self.cur_session)
self.cur_session = p.tls_session
self.remain_in = b""
if isinstance(p, SSLv2) and not p.msg:
p.msg = Raw("")
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
if isinstance(p, TLS13):
self.buffer_in += p.inner.msg
else:
# should be TLS13ServerHello only
self.buffer_in += p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain_in += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
p = p.payload
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
self.buffer_in += p.inner.msg
def raise_on_packet(self, pkt_cls, state, get_next_msg=True):
"""
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
"""
# Maybe we already parsed the expected packet, maybe not.
if get_next_msg:
self.get_next_msg()
if (not self.buffer_in or
not isinstance(self.buffer_in[0], pkt_cls)):
return
self.cur_pkt = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
raise state()
def add_record(self, is_sslv2=None, is_tls13=None):
"""
Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out.
"""
if is_sslv2 is None and is_tls13 is None:
v = (self.cur_session.tls_version or
self.cur_session.advertised_tls_version)
if v in [0x0200, 0x0002]:
is_sslv2 = True
elif v >= 0x0304:
is_tls13 = True
if is_sslv2:
self.buffer_out.append(SSLv2(tls_session=self.cur_session))
elif is_tls13:
self.buffer_out.append(TLS13(tls_session=self.cur_session))
else:
self.buffer_out.append(TLS(tls_session=self.cur_session))
def add_msg(self, pkt):
"""
Add a TLS message (e.g. TLSClientHello or TLSApplicationData)
inside the latest record to be sent through the socket.
We believe a good automaton should not use the first test.
"""
if not self.buffer_out:
self.add_record()
r = self.buffer_out[-1]
if isinstance(r, TLS13):
self.buffer_out[-1].inner.msg.append(pkt)
else:
self.buffer_out[-1].msg.append(pkt)
def flush_records(self):
"""
Send all buffered records and update the session accordingly.
"""
s = b"".join(p.raw_stateful() for p in self.buffer_out)
self.socket.send(s)
self.buffer_out = []
def vprint(self, s=""):
if self.verbose:
if conf.interactive:
log_interactive.info("> %s", s)
else:
print("> %s" % s)
|
francois-contat/scapy
|
scapy/layers/tls/automaton.py
|
Python
|
gpl-2.0
| 8,976
|
# -*- coding: iso-8859-1 -*-
# Text translations for Suomi (fi).
# Automatically generated - DO NOT EDIT, edit fi.po instead!
meta = {
'language': 'Suomi',
'maintainer': '***vacant***',
'encoding': 'iso-8859-1',
'direction': 'ltr',
}
text = {
'''Create this page''':
'''Luo tämä sivu''',
'''Edit "%(pagename)s"''':
'''Muokkaa "%(pagename)s"''',
'''Reduce editor size''':
'''Pienennä editointi ikkunan kokoa''',
'''Describe %s here.''':
'''Kuvaile %s tässä.''',
'''Check Spelling''':
'''Oikolue''',
'''Save Changes''':
'''Talleta muutokset''',
'''Cancel''':
'''Peruuta''',
'''Preview''':
'''Esikatsele''',
'''Edit was cancelled.''':
'''Muokkaus peruttu.''',
'''Edit''':
'''Muokkaa''',
'''Default''':
'''Oletusarvo''',
'''Name''':
'''Nimi''',
'''Password''':
'''Tunnussana''',
'''Email''':
'''Sähköposti''',
'''Editor size''':
'''Editointikentän koko''',
'''Time zone''':
'''Aikavyöhyke''',
'''Your time is''':
'''Aikasi on''',
'''Server time is''':
'''Palvelimen aika on''',
'''Date format''':
'''Päivämäärän muoto''',
'''General options''':
'''Yleiset Asetukset''',
'''General Information''':
'''Yleiset Tiedot''',
'''Revision History''':
'''Versiohistoria''',
'''Date''':
'''Päivämäärä''',
'''Size''':
'''Koko''',
'''Editor''':
'''Editori''',
'''Comment''':
'''Huomautus''',
'''view''':
'''näytä''',
'''revert''':
'''palauta''',
'''Show "%(title)s"''':
'''Näytä "%(title)s"''',
'''You are not allowed to revert this page!''':
'''Sinulla ei ole oikeutta palauttaa tätä sivua!''',
'''Python Version''':
'''Python Versio''',
'''Sycamore Version''':
'''Sycamore Versio''',
'''4Suite Version''':
'''4Suite Versio''',
'''del''':
'''poista''',
'''get''':
'''hae''',
'''edit''':
'''muokkaa''',
'''No attachments stored for %(pagename)s''':
'''Sivulla %(pagename)s ei ole liitteitä''',
'''attachment:%(filename)s of %(pagename)s''':
'''liite:%(filename)s / %(pagename)s''',
'''Page "%s" was successfully deleted!''':
'''Sivu "%s" poistettiin onnistuneesti!''',
'''Really delete this page?''':
'''Haluatko varmasti poistaa tämän sivun?''',
'''Drawing \'%(filename)s\' saved.''':
'''Piirrustus \'%(filename)s\' talletettu.''',
'''Create new drawing "%(filename)s"''':
'''Luo uusi piirrustus "%(filename)s"''',
'''date''':
'''päivämäärä''',
'''Others''':
'''Muut''',
'''Clear message''':
'''Tyhjennä viesti''',
'''Mail sent OK''':
'''Sähköposti lähetetty onnistuneesti''',
}
|
philipn/sycamore
|
Sycamore/i18n/fi.py
|
Python
|
gpl-2.0
| 2,372
|
#
# Copyright 2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
|
oVirt/vdsm
|
tests/virt/__init__.py
|
Python
|
gpl-2.0
| 887
|
#!/home/mshameers/Templates/py2.7_flask.9/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
|
mshameers/me
|
db_create.py
|
Python
|
gpl-2.0
| 529
|
from __future__ import print_function, absolute_import, division
from .project import Project
from .builder import ProjectBuilder, FahProjectBuilder
|
msmbuilder/msmbuilder-legacy
|
MSMBuilder/project/__init__.py
|
Python
|
gpl-2.0
| 150
|
"""
Texture Replacement
+++++++++++++++++++
Example of how to replace a texture in game with an external image.
``createTexture()`` and ``removeTexture()`` are to be called from a
module Python Controller.
"""
from bge import logic
from bge import texture
def createTexture(cont):
"""Create a new Dynamic Texture"""
obj = cont.owner
# get the reference pointer (ID) of the internal texture
ID = texture.materialID(obj, 'IMoriginal.png')
# create a texture object
object_texture = texture.Texture(obj, ID)
# create a new source with an external image
url = logic.expandPath("//newtexture.jpg")
new_source = texture.ImageFFmpeg(url)
# the texture has to be stored in a permanent Python object
logic.texture = object_texture
# update/replace the texture
logic.texture.source = new_source
logic.texture.refresh(False)
def removeTexture(cont):
"""Delete the Dynamic Texture, reversing back the final to its original state."""
try:
del logic.texture
except:
pass
|
pawkoz/dyplom
|
blender/doc/python_api/examples/bge.texture.1.py
|
Python
|
gpl-2.0
| 1,051
|
# -*- coding: utf-8 -*-
import common
import sys, os, traceback
import time
import random
import re
import urllib
import string
from string import lower
from entities.CList import CList
from entities.CItemInfo import CItemInfo
from entities.CListItem import CListItem
from entities.CRuleItem import CRuleItem
import customReplacements as cr
import customConversions as cc
from utils import decryptionUtils as crypt
from utils import datetimeUtils as dt
from utils import rowbalance as rb
from utils.fileUtils import findInSubdirectory, getFileContent, getFileExtension
from utils.scrapingUtils import findVideoFrameLink, findContentRefreshLink, findRTMP, findJS, findPHP, getHostName, findEmbedPHPLink
from common import getHTML
class ParsingResult(object):
class Code:
SUCCESS = 0
CFGFILE_NOT_FOUND = 1
CFGSYNTAX_INVALID = 2
WEBREQUEST_FAILED = 3
def __init__(self, code, itemsList):
self.code = code
self.list = itemsList
self.message = None
class Parser(object):
"""
returns a list of items
"""
def parse(self, lItem):
url = lItem['url']
cfg = lItem['cfg']
ext = getFileExtension(url)
successfullyScraped = True
tmpList = None
if lItem['catcher']:
catcher = lItem['catcher']
cfg = os.path.join(common.Paths.catchersDir, '__' + catcher + '.cfg')
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
else:
if ext == 'cfg':
tmpList = self.__loadLocal(url, lItem)
if tmpList and tmpList.start != '' and len(tmpList.rules) > 0:
lItem['url'] = tmpList.start
successfullyScraped = self.__loadRemote(tmpList, lItem)
elif cfg:
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
# autoselect
if tmpList and tmpList.skill.find('autoselect') != -1 and len(tmpList.items) == 1:
m = tmpList.items[0]
m_type = m['type']
if m_type == 'rss':
common.log('Autoselect - ' + m['title'])
lItem = m
tmpList = self.parse(lItem).list
if not tmpList:
return ParsingResult(ParsingResult.Code.CFGSYNTAX_INVALID, None)
if tmpList and successfullyScraped == False:
return ParsingResult(ParsingResult.Code.WEBREQUEST_FAILED, tmpList)
# Remove duplicates
if tmpList.skill.find('allowDuplicates') == -1:
urls = []
for i in range(len(tmpList.items)-1,-1,-1):
item = tmpList.items[i]
tmpUrl = item['url']
tmpCfg = item['cfg']
if not tmpCfg:
tmpCfg = ''
if not urls.__contains__(tmpUrl + '|' + tmpCfg):
urls.append(tmpUrl + '|' + tmpCfg)
else:
tmpList.items.remove(item)
return ParsingResult(ParsingResult.Code.SUCCESS, tmpList)
"""
loads cfg, creates list and sets up rules for scraping
"""
def __loadLocal(self, filename, lItem = None):
params = []
#get Parameters
if filename.find('@') != -1:
params = filename.split('@')
filename = params.pop(0)
# get cfg file
cfg = filename
if not os.path.exists(cfg):
cfg = os.path.join(common.Paths.modulesDir, filename)
if not os.path.exists(cfg):
tmpPath = os.path.dirname(os.path.join(common.Paths.modulesDir, lItem["definedIn"]))
cfg = os.path.join(tmpPath ,filename)
if not os.path.exists(cfg):
srchFilename = filename
if filename.find('/') > -1:
srchFilename = srchFilename.split('/')[1]
try:
cfg = findInSubdirectory(srchFilename, common.Paths.modulesDir)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.favouritesFolder)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.customModulesDir)
except:
common.log('File not found: ' + srchFilename)
return None
#load file and apply parameters
data = getFileContent(cfg)
data = cr.CustomReplacements().replace(os.path.dirname(cfg), data, lItem, params)
#log
msg = 'Local file ' + filename + ' opened'
if len(params) > 0:
msg += ' with Parameter(s): '
msg += ",".join(params)
common.log(msg)
outputList = self.__parseCfg(filename, data, lItem)
return outputList
"""
scrape items according to rules and add them to the list
"""
def __loadRemote(self, inputList, lItem):
try:
inputList.curr_url = lItem['url']
count = 0
i = 1
maxits = 2 # 1 optimistic + 1 demystified
ignoreCache = False
demystify = False
back = ''
startUrl = inputList.curr_url
#print inputList, lItem
while count == 0 and i <= maxits:
if i > 1:
ignoreCache = True
demystify = True
# Trivial: url is from known streamer
if back:
lItem['referer'] = back
items = self.__parseHtml(inputList.curr_url, '"' + inputList.curr_url + '"', inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
# try to find items in html source code
if count == 0:
referer = ''
if lItem['referer']:
referer = lItem['referer']
data = common.getHTML(inputList.curr_url, None, referer, False, False, ignoreCache, demystify)
if data == '':
return False
msg = 'Remote URL ' + inputList.curr_url + ' opened'
if demystify:
msg += ' (demystified)'
common.log(msg)
if inputList.section != '':
section = inputList.section
data = self.__getSection(data, section)
if lItem['section']:
section = lItem['section']
data = self.__getSection(data, section)
items = self.__parseHtml(inputList.curr_url, data, inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
common.log(' -> ' + str(count) + ' item(s) found')
# find rtmp stream
#common.log('Find rtmp stream')
if count == 0:
item = self.__findRTMP(data, startUrl, lItem)
if item:
items = []
items.append(item)
count = 1
# find embedding javascripts
#common.log('Find embedding javascripts')
if count == 0:
item = findJS(data)
if item:
firstJS = item[0]
streamId = firstJS[0]
jsUrl = firstJS[1]
if not jsUrl.startswith('http://'):
jsUrl = urllib.basejoin(startUrl,jsUrl)
streamerName = getHostName(jsUrl)
jsSource = getHTML(jsUrl, None, startUrl, True, False)
phpUrl = findPHP(jsSource, streamId)
if phpUrl:
data = getHTML(phpUrl, None, startUrl, True, True)
item = self.__findRTMP(data, phpUrl, lItem)
if item:
if streamerName:
item['title'] = item['title'].replace('RTMP', streamerName)
items = []
items.append(item)
count = 1
else:
red = phpUrl
common.log(' -> Redirect: ' + red)
if back == red:
break
back = inputList.curr_url
inputList.curr_url = red
common.log(str(len(inputList.items)) + ' items ' + inputList.cfg + ' -> ' + red)
startUrl = red
continue
# find redirects
#common.log('find redirects')
if count == 0:
red = self.__findRedirect(startUrl, inputList.curr_url)
if startUrl == red:
common.log(' -> No redirect found')
else:
common.log(' -> Redirect: ' + red)
if back == red:
break
back = inputList.curr_url
inputList.curr_url = red
common.log(str(len(inputList.items)) + ' items ' + inputList.cfg + ' -> ' + red)
startUrl = red
i = 0
i += 1
if count != 0:
inputList.items = inputList.items + items
except:
traceback.print_exc(file = sys.stdout)
return False
return True
def __findRTMP(self, data, pageUrl, lItem):
rtmp = findRTMP(pageUrl, data)
if rtmp:
item = CListItem()
item['title'] = 'RTMP* - ' + rtmp[1]
item['type'] = 'video'
item['url'] = rtmp[0] + ' playPath=' + rtmp[1] + ' swfUrl=' + rtmp[2] +' swfVfy=1 live=true pageUrl=' + pageUrl
item.merge(lItem)
return item
return None
def __getSection(self, data, section):
p = re.compile(section, re.IGNORECASE + re.DOTALL + re.UNICODE)
m = p.search(data)
if m:
return m.group(0)
else:
common.log(' -> Section could not be found:' + section)
return data
def __findRedirect(self, page, referer='', demystify=False):
data = common.getHTML(page, None, referer = referer, xml = False, mobile=False, demystify = demystify)
if findVideoFrameLink(page, data):
return findVideoFrameLink(page, data)
elif findContentRefreshLink(data):
return findContentRefreshLink(data)
elif findEmbedPHPLink(data):
return findEmbedPHPLink(data)
if not demystify:
return self.__findRedirect(page, referer, True)
return page
def __parseCfg(self, cfgFile, data, lItem):
tmpList = CList()
data = data.replace('\r\n', '\n').split('\n')
items = []
tmp = None
hasOwnCfg = False
for m in data:
if m and m[0] != '#':
index = m.find('=')
if index != -1:
key = lower(m[:index]).strip()
value = m[index+1:]
index = value.find('|')
if value[:index] == 'sports.devil.locale':
value = common.translate(int(value[index+1:]))
elif value[:index] == 'sports.devil.image':
value = os.path.join(common.Paths.imgDir, value[index+1:])
if key == 'start':
tmpList.start = value
elif key == 'section':
tmpList.section = value
elif key == 'sort':
tmpList.sort = value
elif key == 'skill':
tmpList.skill = value
elif key == 'catcher':
tmpList.catcher = value
elif key == 'item_infos':
rule_tmp = CRuleItem()
hasOwnCfg = False
rule_tmp.infos = value
elif key == 'item_order':
rule_tmp.order = value
elif key == 'item_skill':
rule_tmp.skill = value
elif key == 'item_curr':
rule_tmp.curr = value
elif key == 'item_precheck':
rule_tmp.precheck = value
elif key.startswith('item_info'):
tmpkey = key[len('item_info'):]
if tmpkey == '_name':
info_tmp = CItemInfo()
info_tmp.name = value
if value == 'cfg':
hasOwnCfg = True
elif tmpkey == '_from':
info_tmp.src = value
elif tmpkey == '':
info_tmp.rule = value
elif tmpkey == '_default':
info_tmp.default = value
elif tmpkey == '_convert':
info_tmp.convert.append(value)
elif tmpkey == '_build':
info_tmp.build = value
rule_tmp.info_list.append(info_tmp)
elif key == 'item_url_build':
rule_tmp.url_build = value
if tmpList.catcher != '':
refInf = CItemInfo()
refInf.name = 'referer'
refInf.build = value
rule_tmp.info_list.append(refInf)
if not hasOwnCfg:
refInf = CItemInfo()
refInf.name = 'catcher'
refInf.build = tmpList.catcher
rule_tmp.info_list.append(refInf)
tmpList.rules.append(rule_tmp)
# static menu items (without regex)
elif key == 'title':
tmp = CListItem()
tmp['title'] = value
if tmpList.skill.find('videoTitle') > -1:
tmp['videoTitle'] = value
elif key == 'url':
tmp['url'] = value
if lItem:
tmp.merge(lItem)
if tmpList.catcher != '':
tmp['referer'] = value
if not hasOwnCfg:
tmp['catcher'] = tmpList.catcher
tmp['definedIn'] = cfgFile
items.append(tmp)
tmp = None
elif tmp != None:
if key == 'cfg':
hasOwnCfg = True
tmp[key] = value
tmpList.items = items
tmpList.cfg = cfgFile
return tmpList
def __parseHtml(self, url, data, rules, skills, definedIn, lItem):
#common.log('_parseHtml called' + url)
items = []
for item_rule in rules:
#common.log('rule: ' + item_rule.infos)
if not hasattr(item_rule, 'precheck') or (item_rule.precheck in data):
revid = re.compile(item_rule.infos, re.IGNORECASE + re.DOTALL + re.MULTILINE + re.UNICODE)
for reinfos in revid.findall(data):
tmp = CListItem()
if lItem['referer']:
tmp['referer'] = lItem['referer']
if item_rule.order.find('|') != -1:
infos_names = item_rule.order.split('|')
infos_values = list(reinfos)
i = 0
for name in infos_names:
tmp[name] = infos_values[i]
i = i+1
else:
tmp[item_rule.order] = reinfos
for info in item_rule.info_list:
info_value = tmp[info.name]
if info_value:
if info.build.find('%s') != -1:
tmpVal = info.build % info_value
tmp[info.name] = tmpVal
continue
if info.build.find('%s') != -1:
if info.src.__contains__('+'):
tmpArr = info.src.split('+')
src = ''
for t in tmpArr:
t = t.strip()
if t.find('\'') != -1:
src = src + t.strip('\'')
else:
src = src + tmp[t]
elif info.src.__contains__('||'):
variables = info.src.split('||')
src = firstNonEmpty(tmp, variables)
else:
src = tmp[info.src]
if src and info.convert != []:
tmp['referer'] = url
src = self.__parseCommands(tmp, src, info.convert)
if isinstance(src, dict):
for dKey in src:
tmp[dKey] = src[dKey]
src = src.values()[0]
info_value = info.build % (src)
else:
info_value = info.build
tmp[info.name] = info_value
if tmp['url']:
tmp['url'] = item_rule.url_build % (tmp['url'])
else:
tmp['url'] = url
tmp.merge(lItem)
if item_rule.skill.find('append') != -1:
tmp['url'] = url + tmp['url']
if item_rule.skill.find('space') != -1:
tmp['title'] = ' %s ' % tmp['title'].strip()
if skills.find('videoTitle') > -1:
tmp['videoTitle'] = tmp['title']
tmp['definedIn'] = definedIn
items.append(tmp)
return items
def __parseCommands(self, item, src, convCommands):
common.log('_parseCommands called')
# helping function
def parseCommand(txt):
command = {"command": txt, "params": ""}
if txt.find("(") > -1:
command["command"] = txt[0:txt.find("(")]
command["params"] = txt[len(command["command"]) + 1:-1]
return command
for convCommand in convCommands:
pComm = parseCommand(convCommand)
command = pComm["command"]
params = pComm["params"]
if params.find('@REFERER@'):
referer = item['referer']
if not referer:
referer = ''
params = params.replace('@REFERER@', referer)
if command == 'convDate':
src = cc.convDate(params, src)
elif command == 'convTimestamp':
src = cc.convTimestamp(params, src)
elif command == 'select':
src = cc.select(params, src)
if not src:
continue
elif command == 'unicode_escape':
src = src.decode('unicode-escape')
elif command == 'replaceFromDict':
dictName = str(params.strip('\''))
path = os.path.join(common.Paths.dictsDir, dictName + '.txt')
if not (os.path.exists(path)):
common.log('Dictionary file not found: ' + path)
continue
src = cc.replaceFromDict(path, src)
elif command == 'time':
src = time.time()
elif command == 'timediff':
src = dt.timediff(src,params.strip('\''))
elif command == 'offset':
src = cc.offset(params, src)
elif command == 'getSource':
src = cc.getSource(params, src)
elif command == 'quote':
try:
src = urllib.quote(params.strip("'").replace('%s', src),'')
except:
cleanParams = params.strip("'")
cleanParams = cleanParams.replace("%s",src)
src = urllib.quote(cleanParams.encode('utf-8'),'')
elif command == 'unquote':
src = urllib.unquote(params.strip("'").replace('%s', src))
elif command == 'parseText':
src = cc.parseText(item, params, src)
elif command == 'getInfo':
src = cc.getInfo(item, params, src)
elif command == 'getXML':
src = cc.getInfo(item, params, src, xml=True)
elif command == 'getMobile':
src = cc.getInfo(item, params, src, mobile=True)
elif command == 'decodeBase64':
src = cc.decodeBase64(src)
elif command == 'decodeRawUnicode':
src = cc.decodeRawUnicode(src)
elif command == 'resolve':
src = cc.resolve(src)
elif command == 'decodeXppod':
src = cc.decodeXppod(src)
elif command == 'decodeXppodHLS':
src = cc.decodeXppod_hls(src)
elif command == 'replace':
src = cc.replace(params, src)
elif command == 'replaceRegex':
src = cc.replaceRegex(params, src)
elif command == 'ifEmpty':
src = cc.ifEmpty(item, params, src)
elif command == 'isEqual':
src = cc.isEqual(item, params, src)
elif command == 'ifFileExists':
src = cc.ifFileExists(item, params, src)
elif command == 'ifExists':
src = cc.ifExists(item, params, src)
elif command == 'encryptJimey':
src = crypt.encryptJimey(params.strip("'").replace('%s', src))
elif command == 'gAesDec':
src = crypt.gAesDec(src,item.infos[params])
elif command == 'getCookies':
src = cc.getCookies(params, src)
elif command == 'destreamer':
src = crypt.destreamer(params.strip("'").replace('%s', src))
elif command == 'unixTimestamp':
src = dt.getUnixTimestamp()
elif command == 'rowbalance':
src = rb.get()
elif command == 'urlMerge':
src = cc.urlMerge(params, src)
elif command == 'translate':
try:
src = common.translate(int(src))
except:
pass
elif command == 'camelcase':
src = string.capwords(string.capwords(src, '-'))
elif command == 'lowercase':
src = string.lower(src)
elif command == 'reverse':
src = src[::-1]
elif command == 'demystify':
print 'demystify'
src = crypt.doDemystify(src)
print 'after demystify',src
elif command == 'random':
paramArr = params.split(',')
minimum = int(paramArr[0])
maximum = int(paramArr[1])
src = str(random.randrange(minimum,maximum))
elif command == 'debug':
common.log('Debug from cfg file: ' + src)
elif command == 'divide':
paramArr = params.split(',')
a = paramArr[0].strip().strip("'").replace('%s', src)
a = resolveVariable(a, item)
b = paramArr[1].strip().strip("'").replace('%s', src)
b = resolveVariable(b, item)
if not a or not b:
continue
a = int(a)
b = int(b)
try:
src = str(a/b)
except:
pass
return src
def resolveVariable(varStr, item):
if varStr.startswith('@') and varStr.endswith('@'):
return item.getInfo(varStr.strip('@'))
return varStr
def firstNonEmpty(tmp, variables):
for v in variables:
vClean = v.strip()
if vClean.find("'") != -1:
vClean = vClean.strip("'")
else:
vClean = tmp.getInfo(vClean)
if vClean != '':
return vClean
return ''
|
Crach1015/plugin.video.superpack
|
zip/plugin.video.SportsDevil/lib/parser.py
|
Python
|
gpl-2.0
| 26,568
|
#!/usr/bin/env python
from similaritymeasures import Similarity
def main():
""" main function to create Similarity class instance and get use of it """
measures = Similarity()
print measures.euclidean_distance([0,3,4,5],[7,6,3,-1])
print measures.jaccard_similarity([0,1,2,5,6],[0,2,3,5,7,9])
if __name__ == "__main__":
main()
|
bharcode/MachineLearning
|
commons_ml/Similarity_measures/using_similarity.py
|
Python
|
gpl-2.0
| 338
|
# vim:ts=4:et
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from ..utils import strip_nnn
from . import attachnode
from . import export
def is_group_root(obj, objects):
if not obj.parent:
return True
return obj.parent.name not in objects
def collect_objects(collection):
objects = {}
def collect(col):
for o in col.objects:
objects[o.name] = o
for c in col.children:
collect(c)
collect(collection)
return objects
def export_collection(obj, muobj, mu):
saved_exported_objects = set(export.exported_objects)
group = obj.instance_collection
objects = collect_objects(group)
for n in objects:
o = objects[n]
# while KSP models (part/prop/internal) will have only one root
# object, grouping might be used for other purposes (eg, greeble)
# so support multiple group root objects
if o.hide_render or not is_group_root(o, objects):
continue
child = export.make_obj(mu, o, mu.path)
if child:
muobj.children.append(child)
export.exported_objects = saved_exported_objects
def handle_empty(obj, muobj, mu):
if obj.instance_collection:
if obj.instance_type != 'COLLECTION':
#FIXME flag an error? figure out something else to do?
return None
export_collection(obj, muobj, mu)
name = strip_nnn(obj.name)
if name[:5] == "node_":
n = attachnode.AttachNode(obj, mu.inverse)
mu.nodes.append(n)
if not n.keep_transform() and not obj.children:
return None
muobj.transform.localRotation @= attachnode.rotation_correction
elif name == "thrustTransform":
muobj.transform.localRotation @= attachnode.rotation_correction
elif name in ["CoMOffset", "CoPOffset", "CoLOffset"]:
setattr(mu, name, (mu.inverse @ obj.matrix_world.col[3])[:3])
if not obj.children:
return None
return muobj
type_handlers = {
type(None): handle_empty
}
|
taniwha-qf/io_object_mu
|
export_mu/empty.py
|
Python
|
gpl-2.0
| 2,799
|
import click
import pickle
from build import Build
@click.group()
def cli():
pass
@cli.command()
@click.option('--cache-file', default='test-cache')
@click.option('--query')
def query(cache_file, query):
with open(cache_file, 'rb') as f:
key, criteria = query.split('=')
buildobjs = pickle.load(f)
for name, build in buildobjs.items():
item = getattr(build, key, '')
if criteria in item:
print(build, item)
cli()
|
jpmontez/jenkins-rpc
|
scripts/build-summary/cachequery.py
|
Python
|
gpl-2.0
| 492
|
""" Checks assigning attributes not found in class slots
will trigger assigning-non-slot warning.
"""
# pylint: disable=too-few-public-methods, no-init, missing-docstring, no-absolute-import, import-error
from collections import deque
from missing import Unknown
class Empty(object):
""" empty """
class Bad(object):
""" missing not in slots. """
__slots__ = ['member']
def __init__(self):
self.missing = 42 # [assigning-non-slot]
class Bad2(object):
""" missing not in slots """
__slots__ = [deque.__name__, 'member']
def __init__(self):
self.deque = 42
self.missing = 42 # [assigning-non-slot]
class Bad3(Bad):
""" missing not found in slots """
__slots__ = ['component']
def __init__(self):
self.component = 42
self.member = 24
self.missing = 42 # [assigning-non-slot]
super(Bad3, self).__init__()
class Good(Empty):
""" missing not in slots, but Empty doesn't
specify __slots__.
"""
__slots__ = ['a']
def __init__(self):
self.missing = 42
class Good2(object):
""" Using __dict__ in slots will be safe. """
__slots__ = ['__dict__', 'comp']
def __init__(self):
self.comp = 4
self.missing = 5
class PropertyGood(object):
""" Using properties is safe. """
__slots__ = ['tmp', '_value']
@property
def test(self):
return self._value
@test.setter
def test(self, value):
# pylint: disable=attribute-defined-outside-init
self._value = value
def __init__(self):
self.test = 42
class PropertyGood2(object):
""" Using properties in the body of the class is safe. """
__slots__ = ['_value']
def _getter(self):
return self._value
def _setter(self, value):
# pylint: disable=attribute-defined-outside-init
self._value = value
test = property(_getter, _setter)
def __init__(self):
self.test = 24
class UnicodeSlots(object):
"""Using unicode objects in __slots__ is okay.
On Python 3.3 onward, u'' is equivalent to '',
so this test should be safe for both versions.
"""
__slots__ = (u'first', u'second')
def __init__(self):
self.first = 42
self.second = 24
class DataDescriptor(object):
def __init__(self, name, default=''):
self.__name = name
self.__default = default
def __get__(self, inst, cls):
return getattr(inst, self.__name, self.__default)
def __set__(self, inst, value):
setattr(inst, self.__name, value)
class NonDataDescriptor(object):
def __get__(self, inst, cls):
return 42
class SlotsWithDescriptor(object):
__slots__ = ['_err']
data_descriptor = DataDescriptor('_err')
non_data_descriptor = NonDataDescriptor()
missing_descriptor = Unknown()
def dont_emit_for_descriptors():
inst = SlotsWithDescriptor()
# This should not emit, because attr is
# a data descriptor
inst.data_descriptor = 'foo'
inst.non_data_descriptor = 'lala' # [assigning-non-slot]
return
class ClassWithSlots(object):
__slots__ = ['foobar']
class ClassReassigningDunderClass(object):
__slots__ = ['foobar']
def release(self):
self.__class__ = ClassWithSlots
class ClassReassingingInvalidLayoutClass(object):
__slots__ = []
def release(self):
self.__class__ = ClassWithSlots # [assigning-non-slot]
|
rogalski/pylint
|
pylint/test/functional/assigning_non_slot.py
|
Python
|
gpl-2.0
| 3,455
|
import xbmc, xbmcgui, xbmcaddon, xbmcplugin, re
import urllib, urllib2
import re, string
import threading
import os
import base64
#from t0mm0.common.addon import Addon
#from t0mm0.common.net import Net
import urlparse
import xbmcplugin
import cookielib
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__icon__ = __addon__.getAddonInfo('icon')
addon_id = 'plugin.video.f4mTester'
selfAddon = xbmcaddon.Addon(id=addon_id)
#addon = Addon('plugin.video.f4mTester', sys.argv)
#net = Net()
mode =None
play=False
#play = addon.queries.get('play', None)
paramstring=sys.argv[2]
#url = addon.queries.get('playurl', None)
print paramstring
name=''
proxy_string=None
proxy_use_chunks=True
auth_string=''
streamtype='HDS'
setResolved=False
if paramstring:
paramstring="".join(paramstring[1:])
params=urlparse.parse_qs(paramstring)
url = params['url'][0]
try:
name = params['name'][0]
except:pass
try:
proxy_string = params['proxy'][0]
except:pass
try:
auth_string = params['auth'][0]
except:pass
print 'auth_string',auth_string
try:
streamtype = params['streamtype'][0]
except:pass
print 'streamtype',streamtype
try:
proxy_use_chunks_temp = params['proxy_for_chunks'][0]
import json
proxy_use_chunks=json.loads(proxy_use_chunks_temp)
except:pass
simpleDownloader=False
try:
simpleDownloader_temp = params['simpledownloader'][0]
import json
simpleDownloader=json.loads(simpleDownloader_temp)
except:pass
mode='play'
try:
mode = params['mode'][0]
except: pass
maxbitrate=0
try:
maxbitrate = int(params['maxbitrate'][0])
except: pass
play=True
try:
setResolved = params['setresolved'][0]
import json
setResolved=json.loads(setResolved)
except:setResolved=False
def playF4mLink(url,name,proxy=None,use_proxy_for_chunks=False,auth_string=None,streamtype='HDS',setResolved=False):
from F4mProxy import f4mProxyHelper
player=f4mProxyHelper()
#progress = xbmcgui.DialogProgress()
#progress.create('Starting local proxy')
if setResolved:
urltoplay,item=player.playF4mLink(url, name, proxy, use_proxy_for_chunks,maxbitrate,simpleDownloader,auth_string,streamtype,setResolved)
item.setProperty("IsPlayable", "true")
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
else:
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
player.playF4mLink(url, name, proxy, use_proxy_for_chunks,maxbitrate,simpleDownloader,auth_string,streamtype,setResolved)
return
def getUrl(url, cookieJar=None,post=None,referer=None,isJsonPost=False, acceptsession=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if isJsonPost:
req.add_header('Content-Type','application/json')
if acceptsession:
req.add_header('Accept-Session',acceptsession)
if referer:
req.add_header('Referer',referer)
response = opener.open(req,post,timeout=30)
link=response.read()
response.close()
return link;
def getBBCUrl(urlToFetch):
text=getUrl(urlToFetch)
bitRate="1500"
overrideBitrate=selfAddon.getSetting( "bbcBitRateMax" )
if overrideBitrate<>"": bitRate=overrideBitrate
bitRate=int(bitRate)
regstring='href="(.*?)" bitrate="(.*?)"'
birates=re.findall(regstring, text)
birates=[(int(j),f) for f,j in birates]
birates=sorted(birates, key=lambda f: f[0])
ratesel, urlsel=birates[0]
for r, url in birates:
if r<=bitRate:
ratesel, urlsel=r, url
else:
break
print 'xxxxxxxxx',ratesel, urlsel
return urlsel
def GUIEditExportName(name):
exit = True
while (exit):
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(name)
kb.setHeading('Enter Url')
kb.setHiddenInput(False)
kb.doModal()
if (kb.isConfirmed()):
name = kb.getText()
#name_correct = name_confirmed.count(' ')
#if (name_correct):
# GUIInfo(2,__language__(33224))
#else:
# name = name_confirmed
# exit = False
#else:
# GUIInfo(2,__language__(33225))
exit = False
return(name)
if mode ==None:
videos=[[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_one_hd.f4m') +'|Referer=http://www.bbc.co.uk/iplayer/live/bbcone&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc1 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc1.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_two_hd.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc2 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc2.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_three_hd.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc3 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc3.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifests/hds/pc/llnw/bbc_four_hd.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc4 (uk)','http://www.parker1.co.uk/myth/icons/tv/bbc4.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifesto/audio_video/simulcast/hds/uk/pc/llnw/bbc_news24.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc news (uk)','http://www.parker1.co.uk/myth/icons/tv/bbcnews.png',0,'',False],
[getBBCUrl('http://a.files.bbci.co.uk/media/live/manifesto/audio_video/simulcast/hds/uk/pc/llnw/bbc_parliament.f4m')+'|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc parliment (uk)','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbbc/cbbc_1500.f4m','cbbc (uk) 1500kbps','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbeebies/cbeebies_1500.f4m','cbeebeies (uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.edgesuite.net/pool_1/live/bbc_parliament/bbc_parliament.isml/bbc_parliament-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcparliament&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc parliment (uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_news_channel_hd/bbc_news_channel_hd.isml/bbc_news_channel_hd-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcnews&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc news (uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_one_london/bbc_one_london.isml/bbc_one_london-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcone&X-Requested-With=ShockwaveFlash/18.0.0.160&X-Forwarded-For=212.58.241.131','bbc1 (outside uk) 1500kbps','http://www.parker1.co.uk/myth/icons/tv/bbc1.png',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_two_hd/bbc_two_hd.isml/bbc_two_hd-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbctwo&X-Requested-With=ShockwaveFlash/18.0.0.160','bbc2 (outside uk) 1500kbps','http://www.parker1.co.uk/myth/icons/tv/bbc2.png',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/bbc3/bbc3_1500.f4m|X-Forwarded-For=212.58.241.131','bbc3 (outside uk) 1500kbps [link not valid]','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/bbc4/bbc4_1500.f4m|X-Forwarded-For=212.58.241.131','bbc4 (outside uk) 1500kbps [link not valid]','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbbc/cbbc_1500.f4m|X-Forwarded-For=212.58.241.131','cbbc (outside uk) 1500kbps','',0,'',False],
# ['http://zaphod-live.bbc.co.uk.edgesuite.net/hds-live/livepkgr/_definst_/cbeebies/cbeebies_1500.f4m|X-Forwarded-For=212.58.241.131','cbeebeies (outside uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.edgesuite.net/pool_1/live/bbc_parliament/bbc_parliament.isml/bbc_parliament-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcparliament&X-Requested-With=ShockwaveFlash/18.0.0.160|X-Forwarded-For=212.58.241.131','bbc parliment (outside uk) 1500kbps','',0,'',False],
# ['http://vs-hds-uk-live.bbcfmt.vo.llnwd.net/pool_5/live/bbc_news_channel_hd/bbc_news_channel_hd.isml/bbc_news_channel_hd-audio_2%3d96000-video%3d1374000.f4m|Referer=http://www.bbc.co.uk/iplayer/live/bbcnews&X-Requested-With=ShockwaveFlash/18.0.0.160&X-Forwarded-For=212.58.241.131','bbc news (outside uk) 1500kbps','',0,'',False],
['http://nhkworld-hds-live1.hds1.fmslive.stream.ne.jp/hds-live/nhkworld-hds-live1/_definst_/livestream/nhkworld-live-128.f4m','nhk 128','',0,'',False],
['http://nhkworld-hds-live1.hds1.fmslive.stream.ne.jp/hds-live/nhkworld-hds-live1/_definst_/livestream/nhkworld-live-256.f4m','nhk 256','',0,'',False],
['http://nhkworld-hds-live1.hds1.fmslive.stream.ne.jp/hds-live/nhkworld-hds-live1/_definst_/livestream/nhkworld-live-512.f4m','nhk 512','',0,'',False],
['http://77.245.150.95/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','Turkish','',0,'',False],
['http://88.157.194.246/live/ramdisk/zrtp1/HDS/zrtp1.f4m','j0anita','',0,'',False],
['http://ak.live.cntv.cn/z/cctv9_1@139238/manifest.f4m?hdcore=2.11.3&g=OUVOVEOVETYH','cntv.cn','',0,'',False],
['http://mlghds-lh.akamaihd.net/z/mlg17_1@167001/manifest.f4m?hdcore=2.11.3&g=TOFRPVFGXLFS','alibaba','',0,'',False],
['http://peer-stream.com/api/get_manifest.f4m?groupspec=G:0101010c050e6f72663200','streamtivi.com','',0,'',False],
['http://164.100.31.234/hds-live/livepkgr/_definst_/rstvlive.f4m','Rajya Sabha TV','',0,'',False],
['http://fmssv1.merep.com/hds-live/livepkgr/_definst_/liveevent/livestream.f4m?blnpc20130909042035_1061880273','media center','',0,'',False],
['http://fms01.stream.internetone.it/hds-live/livepkgr/_definst_/8fm/8fm1.f4m','Italy otto 8 FMTV','',0,'',False],
['http://88.150.239.241/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','Son Araba','',0,'',False],
['http://202.162.123.172/hds-live/livepkgr/_definst_/liveevent/livestream4.f4m','Chine Live event 4','',0,'',False],
['http://zb.wyol.com.cn/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_350.f4m','CCTV 1 China','',0,'',False],
['http://zb.zghhzx.net/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_350.f4m','CCTV13 China','',0,'',False],
['http://zb.sygd.tv/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_350.f4m','SYGD TV china','',0,'',False],
['http://zb.pudongtv.cn/hds-live/livepkgr/_definst_/wslhevent/hls_pindao_1_500.f4m','Pudong TV China','',0,'',False],
['http://88.150.239.241/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','AKS TV Turkey','',0,'',False],
['http://fms.quadrant.uk.com/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','Quadrant live streams UK','',0,'',False],
['http://cdn3.1internet.tv/hds-live11/livepkgr/_definst_/1tv-hd.f4m','1 HD cdn1 Russia','',0,'',False],
['http://cdn2.1internet.tv/hds-live/livepkgr/_definst_/1tv.f4m','1 HD cdn2 Russia','',0,'',False],
['http://193.232.151.135/hds-live-not-protected/livepkgr/_1099_/1099/1099-70.f4m','ndtv plus - proxy needed','',0,'',False],
['http://bbcwshdlive01-lh.akamaihd.net/z/atv_1@61433/manifest.f4m?hdcore=2.11.3','BBC Arabic','',0,'',False],
['http://skaihd-f.akamaihd.net/z/advert/ORAL_B_SHAKIRA_20-SKAI.mp4/manifest.f4m?hdcore=2.6.8&g=OGEJOEGNJICP','Greek Oral B advert','',0,'',False],
['http://srgssr_uni_11_ww-lh.akamaihd.net/z/enc11uni_ww@112996/manifest.f4m?g=XTJVOORDBMQF&hdcore=2.11.3','RTS Swiss a proxy needed?','',0,'',False],
['http://ccr.cim-jitp.top.comcast.net/cimomg04/OPUS/83/162/119271491507/1389989008837/119271491507_1389986611184_1850000_4.f4m','aliakrep DRM not working','',0,'',False],
['http://stream1-prod.spectar.tv:1935/mrt-edge/_definst_/mrt3/smil:all-streams.isml/manifest.f4m','mrt3/all-streams.isml','',0,'',False],
['http://hdv.gamespotcdn.net/z/d5/2013/10/16/Gameplay_GettingRevengeinGTAOnline_101613_,600,1000,1800,3200,4000,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=JNMDDRCQSDCH','Recorded..Getting Revenge in GTA maxbitrate 2000','',2006,'',False],
['http://hdv.gamespotcdn.net/z/d5/2013/10/16/Gameplay_GettingRevengeinGTAOnline_101613_,600,1000,1800,3200,4000,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=JNMDDRCQSDCH','Recorded..Getting Revenge in GTA maxbitrate Highest','',0,'',False],
['http://hdv.gamespotcdn.net/z/d5/2014/04/24/GSNews_Apr24_20140424a_,600,1000,1800,3200,4000,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=KUVLMGTKPJFF','Recorded..Gamespot news highest bitrate','',0,'',False],
['http://hdv.gamespotcdn.net/z/d5/2014/04/24/GSNews_Apr24_20140424a_,600,.mp4.csmil/manifest.f4m?hdcore=2.10.3&g=KUVLMGTKPJFF','Recorded..Gamespot news 600 bitrate','',0,'',False],
['http://202.125.131.170:1935/pitelevision/smil:geokahani.smil/manifest.f4m','Pitelevision geo kahani','',0,'',False],
['http://stream.flowplayer.org/flowplayer-700.flv','TESTING not F4M','',0,'',False],
['http://hlscache.fptplay.net.vn/live/htvcmovieHD_2500.stream/manifest.f4m|Referer=http://play.fpt.vn/static/mediaplayer/FPlayer.swf','Viet 2500bitrate','',0,'',False],
['http://hlscache.fptplay.net.vn/live/onetv_1000.stream/manifest.f4m|Referer=http://play.fpt.vn/static/mediaplayer/FPlayer.swf','Viet 1000bitrate','',0,'',False],
['http://88.157.194.246/live/ramdisk/zsic/HDS/zviseu.f4m','Sic http://viseu.es.tl/','',0,'',False],
['http://www.rte.ie/manifests/rte1.f4m','Rte.ie multi nested manifests','',0,'',False],
['http://olystreameast.nbcolympics.com/vod/157717c8-9c74-4fd1-ab1a-7daca5246324/geo1-lucas-oil-pro-motocross0531120959-ua.ism/manifest(format=f4m-f4f).f4m','NBc olypics','',900,'108.163.254.214:7808',False],
['http://olystreameast.nbcolympics.com/vod/31883e54-e85b-4551-a24a-46accc4a9d49/nbc-sports-live-extra0601123118-ua.ism/manifest(format=f4m-f4f,filtername=vodcut).f4m','NBc extra olypics','',900,'108.163.254.214:7808',False],
['http://77.245.150.95/hds-live/livepkgr/_definst_/liveevent/livestream.f4m','something else','',0,'',False]]
#['http://dummy','Custom']]
#print videos
if 1==2: #disable it as these links are not working, not sure why
req = urllib2.Request('http://www.gzcbn.tv/app/?app=ios&controller=cmsapi&action=pindao')
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
## print link
s='title\":\"(.*?)\",\"stream\":\"(.*?)\"'
#
match=re.compile(s).findall(link)
i=0
for i in range(len(match)):
match[i]= (match[i][1].replace('\\/','/'),match[i][0])
videos+=match #disabled for time being as these are not working
#print videos
for (file_link,name,imgurl,maxbitrate,proxy,usechunks) in videos:
liz=xbmcgui.ListItem(name,iconImage=imgurl, thumbnailImage=imgurl)
liz.setInfo( type="Video", infoLabels={ "Title": name} )
#liz.setProperty("IsPlayable","true")
u = sys.argv[0] + "?" + urllib.urlencode({'url': file_link,'mode':'play','name':name,'maxbitrate':maxbitrate,'proxy':proxy,'proxy_for_chunks':usechunks})
print u
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False, )
elif mode == "play":
print 'PLAying ',mode,url,setResolved
if not name in ['Custom','TESTING not F4M'] :
playF4mLink(url,name, proxy_string, proxy_use_chunks,auth_string,streamtype,setResolved)
else:
listitem = xbmcgui.ListItem( label = str(name), iconImage = "DefaultVideo.png", thumbnailImage = xbmc.getInfoImage( "ListItem.Thumb" ), path=url )
xbmc.Player().play( url,listitem)
#newUrl=GUIEditExportName('')
#if not newUrl=='':
# playF4mLink(newUrl,name)
if not play:
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
|
pauron/ShaniXBMCWork
|
plugin.video.f4mTester/default.py
|
Python
|
gpl-2.0
| 16,981
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
from gnuradio import gr, gr_unittest, blocks
import pmt
class qa_repack_bits_bb (gr_unittest.TestCase):
def setUp (self):
random.seed(0)
self.tb = gr.top_block ()
self.tsb_key = "length"
def tearDown (self):
self.tb = None
def test_001_simple (self):
""" Very simple test, 2 bits -> 1 """
src_data = (0b11, 0b01, 0b10)
expected_data = (0b1, 0b1, 0b1, 0b0, 0b0, 0b1)
k = 2
l = 1
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_001_simple_msb (self):
""" Very simple test, 2 bits -> 1 with MSB set """
src_data = (0b11, 0b01, 0b10)
expected_data = (0b1, 0b1, 0b0, 0b1, 0b1, 0b0)
k = 2
l = 1
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, "", False, gr.GR_MSB_FIRST)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_002_three (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111, 0b11111111)
expected_data = (0b101,) + (0b111,) * 7
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_002_three (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111, 0b11111111)
expected_data = (0b101,) + (0b111,) * 7
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_002_three_msb (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111, 0b11111111)
expected_data = (0b111,) + (0b111,) + (0b011,) + (0b111,) * 5
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, "", False, gr.GR_MSB_FIRST)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_003_lots_of_bytes (self):
""" Lots and lots of bytes, multiple packer stages """
src_data = tuple([random.randint(0, 255) for x in range(3*5*7*8 * 10)])
src = blocks.vector_source_b(src_data, False, 1)
repack1 = blocks.repack_bits_bb(8, 3)
repack2 = blocks.repack_bits_bb(3, 5)
repack3 = blocks.repack_bits_bb(5, 7)
repack4 = blocks.repack_bits_bb(7, 8)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack1, repack2, repack3, repack4, sink)
self.tb.run ()
self.assertEqual(sink.data(), src_data)
def test_003_lots_of_bytes_msb (self):
""" Lots and lots of bytes, multiple packer stages """
src_data = tuple([random.randint(0, 255) for x in range(3*5*7*8 * 10)])
src = blocks.vector_source_b(src_data, False, 1)
repack1 = blocks.repack_bits_bb(8, 3, "", False, gr.GR_MSB_FIRST)
repack2 = blocks.repack_bits_bb(3, 5, "", False, gr.GR_MSB_FIRST)
repack3 = blocks.repack_bits_bb(5, 7, "", False, gr.GR_MSB_FIRST)
repack4 = blocks.repack_bits_bb(7, 8, "", False, gr.GR_MSB_FIRST)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack1, repack2, repack3, repack4, sink)
self.tb.run ()
self.assertEqual(sink.data(), src_data)
def test_004_three_with_tags (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111)
expected_data = (0b101,) + (0b111,) * 4 + (0b001,)
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, self.tsb_key)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_char, 1, len(src_data), self.tsb_key),
repack,
sink
)
self.tb.run ()
self.assertEqual(len(sink.data()), 1)
self.assertEqual(sink.data()[0], expected_data)
def test_005_three_with_tags_trailing (self):
""" 3 -> 8, trailing bits """
src_data = (0b101,) + (0b111,) * 4 + (0b001,)
expected_data = (0b11111101, 0b11111111)
k = 3
l = 8
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, self.tsb_key, True)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_char, 1, len(src_data), self.tsb_key),
repack,
sink
)
self.tb.run ()
self.assertEqual(len(sink.data()), 1)
self.assertEqual(sink.data()[0], expected_data)
if __name__ == '__main__':
gr_unittest.run(qa_repack_bits_bb, "qa_repack_bits_bb.xml")
|
TheWylieStCoyote/gnuradio
|
gr-blocks/python/blocks/qa_repack_bits_bb.py
|
Python
|
gpl-3.0
| 5,600
|
#!/usr/bin/env python
"""
Profile script for CNFgen package
"""
from __future__ import print_function
import os
import sys
from contextlib import contextmanager
@contextmanager
def erase_stdout():
with file(os.devnull,"w") as null:
old_stdout = sys.stdout
sys.stdout = null
yield
sys.stdout = old_stdout
def cnfgen_call():
from cnfformula import cnfgen
cmd = ["cnfgen"] + sys.argv[1:]
with erase_stdout():
cnfgen(cmd)
if __name__ == '__main__':
from cProfile import run as profile
if len(sys.argv) <= 1:
print("Usage: {} <cnfgen_args>".format(sys.argv[0]),file=sys.stderr)
sys.exit(-1)
profile('cnfgen_call()',sort='tottime')
|
marcvinyals/cnfgen
|
profile.py
|
Python
|
gpl-3.0
| 744
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.utils import get_url, cint
from frappe.utils.user import get_user_fullname
from frappe.utils.print_format import download_pdf
from frappe.desk.form.load import get_attachments
from frappe.core.doctype.communication.email import make
from erpnext.accounts.party import get_party_account_currency, get_party_details
from erpnext.stock.doctype.material_request.material_request import set_missing_values
from erpnext.controllers.buying_controller import BuyingController
STANDARD_USERS = ("Guest", "Administrator")
class RequestforQuotation(BuyingController):
def validate(self):
self.validate_duplicate_supplier()
self.validate_common()
self.update_email_id()
def validate_duplicate_supplier(self):
supplier_list = [d.supplier for d in self.suppliers]
if len(supplier_list) != len(set(supplier_list)):
frappe.throw(_("Same supplier has been entered multiple times"))
def validate_common(self):
pc = frappe.get_doc('Purchase Common')
pc.validate_for_items(self)
def update_email_id(self):
for rfq_supplier in self.suppliers:
if not rfq_supplier.email_id:
rfq_supplier.email_id = frappe.db.get_value("Contact", rfq_supplier.contact, "email_id")
def validate_email_id(self, args):
if not args.email_id:
frappe.throw(_("Row {0}: For supplier {0} Email Address is required to send email").format(args.idx, args.supplier))
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
def send_to_supplier(self):
for rfq_supplier in self.suppliers:
if rfq_supplier.send_email:
self.validate_email_id(rfq_supplier)
# make new user if required
update_password_link = self.update_supplier_contact(rfq_supplier, self.get_link())
self.update_supplier_part_no(rfq_supplier)
self.supplier_rfq_mail(rfq_supplier, update_password_link, self.get_link())
def get_link(self):
# RFQ link for supplier portal
return get_url("/rfq/" + self.name)
def update_supplier_part_no(self, args):
self.vendor = args.supplier
for item in self.items:
item.supplier_part_no = frappe.db.get_value('Item Supplier',
{'parent': item.item_code, 'supplier': args.supplier}, 'supplier_part_no')
def update_supplier_contact(self, rfq_supplier, link):
'''Create a new user for the supplier if not set in contact'''
update_password_link = ''
if frappe.db.exists("User", rfq_supplier.email_id):
user = frappe.get_doc("User", rfq_supplier.email_id)
else:
user, update_password_link = self.create_user(rfq_supplier, link)
self.update_contact_of_supplier(rfq_supplier, user)
return update_password_link
def update_contact_of_supplier(self, rfq_supplier, user):
if rfq_supplier.contact:
contact = frappe.get_doc("Contact", rfq_supplier.contact)
else:
contact = frappe.new_doc("Contact")
contact.first_name = rfq_supplier.supplier_name or rfq_supplier.supplier
contact.supplier = rfq_supplier.supplier
if not contact.email_id and not contact.user:
contact.email_id = user.name
contact.user = user.name
contact.save(ignore_permissions=True)
def create_user(self, rfq_supplier, link):
user = frappe.get_doc({
'doctype': 'User',
'send_welcome_email': 0,
'email': rfq_supplier.email_id,
'first_name': rfq_supplier.supplier_name or rfq_supplier.supplier,
'user_type': 'Website User',
'redirect_url': link
})
user.save(ignore_permissions=True)
update_password_link = user.reset_password()
return user, update_password_link
def supplier_rfq_mail(self, data, update_password_link, rfq_link):
full_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'update_password_link': update_password_link,
'message': frappe.render_template(self.message_for_supplier, data.as_dict()),
'rfq_link': rfq_link,
'user_fullname': full_name
}
subject = _("Request for Quotation")
template = "templates/emails/request_for_quotation.html"
sender = frappe.session.user not in STANDARD_USERS and frappe.session.user or None
message = frappe.get_template(template).render(args)
attachments = self.get_attachments()
self.send_email(data, sender, subject, message, attachments)
def send_email(self, data, sender, subject, message, attachments):
make(subject = subject, content=message,recipients=data.email_id,
sender=sender,attachments = attachments, send_email=True,
doctype=self.doctype, name=self.name)["name"]
frappe.msgprint(_("Email sent to supplier {0}").format(data.supplier))
def get_attachments(self):
attachments = [d.name for d in get_attachments(self.doctype, self.name)]
attachments.append(frappe.attach_print(self.doctype, self.name, doc=self))
return attachments
@frappe.whitelist()
def send_supplier_emails(rfq_name):
check_portal_enabled('Request for Quotation')
rfq = frappe.get_doc("Request for Quotation", rfq_name)
if rfq.docstatus==1:
rfq.send_to_supplier()
def check_portal_enabled(reference_doctype):
if not frappe.db.get_value('Portal Menu Item',
{'reference_doctype': reference_doctype}, 'enabled'):
frappe.throw(_("Request for Quotation is disabled to access from portal, for more check portal settings."))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["show_sidebar"] = True
return list_context
# This method is used to make supplier quotation from material request form.
@frappe.whitelist()
def make_supplier_quotation(source_name, for_supplier, target_doc=None):
def postprocess(source, target_doc):
target_doc.supplier = for_supplier
args = get_party_details(for_supplier, party_type="Supplier", ignore_permissions=True)
target_doc.currency = args.currency or get_party_account_currency('Supplier', for_supplier, source.company)
target_doc.buying_price_list = args.buying_price_list or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
set_missing_values(source, target_doc)
doclist = get_mapped_doc("Request for Quotation", source_name, {
"Request for Quotation": {
"doctype": "Supplier Quotation",
"validation": {
"docstatus": ["=", 1]
}
},
"Request for Quotation Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"name": "request_for_quotation_item",
"parent": "request_for_quotation"
},
}
}, target_doc, postprocess)
return doclist
# This method is used to make supplier quotation from supplier's portal.
@frappe.whitelist()
def create_supplier_quotation(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
try:
sq_doc = frappe.get_doc({
"doctype": "Supplier Quotation",
"supplier": doc.get('supplier'),
"terms": doc.get("terms"),
"company": doc.get("company"),
"currency": doc.get('currency') or get_party_account_currency('Supplier', doc.get('supplier'), doc.get('company')),
"buying_price_list": doc.get('buying_price_list') or frappe.db.get_value('Buying Settings', None, 'buying_price_list')
})
add_items(sq_doc, doc.get('supplier'), doc.get('items'))
sq_doc.flags.ignore_permissions = True
sq_doc.run_method("set_missing_values")
sq_doc.save()
frappe.msgprint(_("Supplier Quotation {0} created").format(sq_doc.name))
return sq_doc.name
except Exception:
return None
def add_items(sq_doc, supplier, items):
for data in items:
if data.get("qty") > 0:
if isinstance(data, dict):
data = frappe._dict(data)
create_rfq_items(sq_doc, supplier, data)
def create_rfq_items(sq_doc, supplier, data):
sq_doc.append('items', {
"item_code": data.item_code,
"item_name": data.item_name,
"description": data.description,
"qty": data.qty,
"rate": data.rate,
"supplier_part_no": frappe.db.get_value("Item Supplier", {'parent': data.item_code, 'supplier': supplier}, "supplier_part_no"),
"warehouse": data.warehouse or '',
"request_for_quotation_item": data.name,
"request_for_quotation": data.parent
})
@frappe.whitelist()
def get_pdf(doctype, name, supplier_idx):
doc = get_rfq_doc(doctype, name, supplier_idx)
if doc:
download_pdf(doctype, name, doc=doc)
def get_rfq_doc(doctype, name, supplier_idx):
if cint(supplier_idx):
doc = frappe.get_doc(doctype, name)
args = doc.get('suppliers')[cint(supplier_idx) - 1]
doc.update_supplier_part_no(args)
return doc
@frappe.whitelist()
def get_item_from_material_requests_based_on_supplier(source_name, target_doc = None):
mr_items_list = frappe.db.sql("""
SELECT
mr.name, mr_item.item_code
FROM
`tabItem` as item,
`tabItem Supplier` as item_supp,
`tabMaterial Request Item` as mr_item,
`tabMaterial Request` as mr
WHERE item_supp.supplier = %(supplier)s
AND item.name = item_supp.parent
AND mr_item.parent = mr.name
AND mr_item.item_code = item.name
AND mr.status != "Stopped"
AND mr.material_request_type = "Purchase"
AND mr.docstatus = 1
AND mr.per_ordered < 99.99""", {"supplier": source_name}, as_dict=1)
material_requests = {}
for d in mr_items_list:
material_requests.setdefault(d.name, []).append(d.item_code)
for mr, items in material_requests.items():
target_doc = get_mapped_doc("Material Request", mr, {
"Material Request": {
"doctype": "Request for Quotation",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Purchase"],
}
},
"Material Request Item": {
"doctype": "Request for Quotation Item",
"condition": lambda row: row.item_code in items,
"field_map": [
["name", "material_request_item"],
["parent", "material_request"],
["uom", "uom"]
]
}
}, target_doc)
return target_doc
|
kressi/erpnext
|
erpnext/buying/doctype/request_for_quotation/request_for_quotation.py
|
Python
|
gpl-3.0
| 10,014
|
import os
from cosmos.api import Cosmos, py_call
from cosmos.util.helpers import environment_variables
def use_cuda_device(some_arg, num_gpus):
assert "CUDA_VISIBLE_DEVICES" in os.environ
print(("some_arg", some_arg))
print(("CUDA_VISIBLE_DEVICES", os.environ["CUDA_VISIBLE_DEVICES"]))
assert len(os.environ["CUDA_VISIBLE_DEVICES"].split(",")) == num_gpus
def main():
cosmos = Cosmos().initdb()
workflow = cosmos.start("gpu", skip_confirm=True)
for i, num_gpus in enumerate([1, 1, 2, 2, 3]):
task = workflow.add_task(
use_cuda_device, dict(some_arg=i, num_gpus=num_gpus), gpu_req=num_gpus, uid=str(i),
)
workflow.run(
max_gpus=len(os.environ["COSMOS_LOCAL_GPU_DEVICES"].split(",")), cmd_wrapper=py_call,
)
if __name__ == "__main__":
with environment_variables(COSMOS_LOCAL_GPU_DEVICES="0,1,3"):
main()
|
LPM-HMS/COSMOS2
|
examples/local_gpus.py
|
Python
|
gpl-3.0
| 895
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import os
import logging
from stat import ST_MODE
import xattr
# project
from kiwi.command import Command
log = logging.getLogger('kiwi')
class DataSync:
"""
**Sync data from a source directory to a target directory
using the rsync protocol**
:param str source_dir: source directory path name
:param str target_dir: target directory path name
"""
def __init__(self, source_dir, target_dir):
self.source_dir = source_dir
self.target_dir = target_dir
def sync_data(self, options=None, exclude=None):
"""
Sync data from source to target using rsync
:param list options: rsync options
:param list exclude: file patterns to exclude
"""
target_entry_permissions = None
exclude_options = []
rsync_options = []
if options:
rsync_options = options
if not self.target_supports_extended_attributes():
warn_me = False
if '-X' in rsync_options:
rsync_options.remove('-X')
warn_me = True
if '-A' in rsync_options:
rsync_options.remove('-A')
warn_me = True
if warn_me:
log.warning(
'Extended attributes not supported for target: %s',
self.target_dir
)
if exclude:
for item in exclude:
exclude_options.append('--exclude')
exclude_options.append(
'/' + item
)
if os.path.exists(self.target_dir):
target_entry_permissions = os.stat(self.target_dir)[ST_MODE]
Command.run(
['rsync'] + rsync_options + exclude_options + [
self.source_dir, self.target_dir
]
)
if target_entry_permissions:
# rsync applies the permissions of the source directory
# also to the target directory which is unwanted because
# only permissions of the files and directories from the
# source directory and its contents should be transfered
# but not from the source directory itself. Therefore
# the permission bits of the target directory before the
# sync are applied back after sync to ensure they have
# not changed
os.chmod(self.target_dir, target_entry_permissions)
def target_supports_extended_attributes(self):
"""
Check if the target directory supports extended filesystem
attributes
:return: True or False
:rtype: bool
"""
try:
xattr.getxattr(self.target_dir, 'user.mime_type')
except Exception as e:
if format(e).startswith('[Errno 95]'):
# libc interface [Errno 95] Operation not supported:
return False
return True
|
b1-systems/kiwi
|
kiwi/utils/sync.py
|
Python
|
gpl-3.0
| 3,632
|
# ####################### BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
insiderr/insiderr-app
|
app/modules/requests/packages/chardet/mbcharsetprober.py
|
Python
|
gpl-3.0
| 3,269
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Our local modules
from trepan.processor.command import base_subcmd as Mbase_subcmd
class SetPatSub(Mbase_subcmd.DebuggerSubcommand):
"""**set patsub** *from-re* *replace-string*
Add a substitution pattern rule replacing *patsub* with
*replace-string* anywhere it is found in source file names. If a
substitution rule was previously set for *from-re*, the old rule is
replaced by the new one.
In the following example, suppose in a docker container /mnt/project is
the mount-point for /home/rocky/project. You are running the code
from the docker container, but debugging this from outside of that.
Example:
--------
set patsub ^/mmt/project /home/rocky/project
"""
in_list = True
max_args = 2
min_abbrev = len("pats")
min_args = 2
short_help = "Set pattern substitution rule"
def run(self, args):
self.proc.add_remap_pat(args[0], args[1])
pass
if __name__ == "__main__":
from trepan.processor.command.set_subcmd import __demo_helper__ as Mhelper
Mhelper.demo_run(SetPatSub)
pass
|
rocky/python2-trepan
|
trepan/processor/command/set_subcmd/patsub.py
|
Python
|
gpl-3.0
| 1,775
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright 2015 Abhijit Menon-Sen <ams@2ndQuadrant.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import fcntl
import os
import pipes
import pty
import select
import subprocess
import time
from ansible import constants as C
from ansible.compat.six import PY3, text_type, binary_type
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.basic import BOOLEANS
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.utils.boolean import boolean
from ansible.utils.path import unfrackpath, makedirs_safe
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
SSHPASS_AVAILABLE = None
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.host = self._play_context.remote_addr
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(b_command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for b_arg in (a.lower() for a in b_command):
if b'controlpersist' in b_arg:
controlpersist = True
elif b'controlpath' in b_arg:
controlpath = True
return controlpersist, controlpath
def _add_args(self, b_command, b_args, explanation):
"""
Adds arguments to the ssh command and displays a caller-supplied explanation of why.
:arg b_command: A list containing the command to add the new arguments to.
This list will be modified by this method.
:arg b_args: An iterable of new arguments to add. This iterable is used
more than once so it must be persistent (ie: a list is okay but a
StringIO would not)
:arg explanation: A text string containing explaining why the arguments
were added. It will be displayed with a high enough verbosity.
.. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
"""
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self._play_context.remote_addr)
b_command += b_args
def _build_command(self, binary, *other_args):
'''
Takes a binary (ssh, scp, sftp) and optional extra arguments and returns
a command line as an array that can be passed to subprocess.Popen.
'''
b_command = []
#
# First, the command to invoke
#
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
if self._play_context.password:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
self.sshpass_pipe = os.pipe()
b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
#
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
if binary == 'sftp' and C.DEFAULT_SFTP_BATCH_MODE:
if self._play_context.password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
b_command += [b'-b', b'-']
if self._play_context.verbosity > 3:
b_command.append(b'-vvv')
#
# Next, we add [ssh_connection]ssh_args from ansible.cfg.
#
if self._play_context.ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(self._play_context.ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
# Now we add various arguments controlled by configuration file settings
# (e.g. host_key_checking) or inventory variables (ansible_ssh_port) or
# a combination thereof.
if not C.HOST_KEY_CHECKING:
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
if self._play_context.port is not None:
b_args = (b"-o", b"Port=" + to_bytes(self._play_context.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
key = self._play_context.private_key_file
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
if not self._play_context.password:
self._add_args(
b_command, (
b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
b"-o", b"PasswordAuthentication=no"
),
u"ansible_password/ansible_ssh_pass not set"
)
user = self._play_context.remote_user
if user:
self._add_args(b_command,
(b"-o", b"User=" + to_bytes(self._play_context.remote_user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
self._add_args(b_command,
(b"-o", b"ConnectTimeout=" + to_bytes(self._play_context.timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
u"ANSIBLE_TIMEOUT/timeout set"
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in (u'ssh_common_args', u'{0}_extra_args'.format(binary)):
attr = getattr(self._play_context, opt, None)
if attr is not None:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
self._add_args(b_command, b_args, u"PlayContext set %s" % opt)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(b_command)
if controlpersist:
self._persistent = True
if not controlpath:
cpdir = unfrackpath(C.ANSIBLE_SSH_CONTROL_PATH_DIR)
b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
# The directory must exist and be writable.
makedirs_safe(b_cpdir, 0o700)
if not os.access(b_cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
b_args = (b"-o", b"ControlPath=" + to_bytes(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir), errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
# Finally, we add any caller-supplied extras.
if other_args:
b_command += [to_bytes(a) for a in other_args]
return b_command
def _send_initial_data(self, fh, in_data):
'''
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
'''
display.debug('Sending initial data')
try:
fh.write(to_bytes(in_data))
fh.close()
except (OSError, IOError):
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
display.debug('Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p):
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source, state, b_chunk, sudoable):
'''
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
'''
output = []
for b_line in b_chunk.splitlines(True):
display_line = to_text(b_line).rstrip('\r\n')
suppress_output = False
#display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
if self._play_context.prompt and self.check_password_prompt(b_line):
display.debug("become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_prompt'] = True
suppress_output = True
elif self._play_context.success_key and self.check_become_success(b_line):
display.debug("become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.check_incorrect_password(b_line):
display.debug("become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_error'] = True
elif sudoable and self.check_missing_password(b_line):
display.debug("become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(b_line)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = b''
if output and not output[-1].endswith(b'\n'):
remainder = output[-1]
output = output[:-1]
return b''.join(output), remainder
def _run(self, cmd, in_data, sudoable=True, checkrc=True):
'''
Starts the command and communicates with it until it ends.
'''
display_cmd = list(map(pipes.quote, map(to_text, cmd)))
display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = list(map(to_bytes, cmd))
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
if PY3 and self._play_context.password:
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'wb', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
if PY3 and self._play_context.password:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = p.stdin
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if self._play_context.password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], to_bytes(self._play_context.password) + b'\n')
except OSError as e:
# Ignore broken pipe errors if the sshpass process has exited.
if e.errno != errno.EPIPE or p.poll() is None:
raise
os.close(self.sshpass_pipe[1])
#
# SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if b'ssh' in cmd:
if self._play_context.prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug(u'Initial state: %s: %s' % (states[state], self._play_context.prompt))
elif self._play_context.become and self._play_context.success_key:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug(u'Initial state: %s: %s' % (states[state], self._play_context.success_key))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
b_stdout = b_stderr = b''
b_tmp_stdout = b_tmp_stderr = b''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self._play_context.timeout
rpipes = [p.stdout, p.stderr]
for fd in rpipes:
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# If we can send initial data without waiting for anything, we do so
# before we call select.
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data)
state += 1
while True:
rfd, wfd, efd = select.select(rpipes, [], [], timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not rfd:
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if p.poll() is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
if p.stdout in rfd:
b_chunk = p.stdout.read()
if b_chunk == b'':
rpipes.remove(p.stdout)
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout, lower the select timeout
# to reduce the time wasted selecting on stderr if we observe
# that the process has not yet existed after this EOF. Otherwise
# we may spend a long timeout period waiting for an EOF that is
# not going to arrive until the persisted connection closes.
timeout = 1
b_tmp_stdout += b_chunk
display.debug("stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
if p.stderr in rfd:
b_chunk = p.stderr.read()
if b_chunk == b'':
rpipes.remove(p.stderr)
b_tmp_stderr += b_chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if b_tmp_stdout:
b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
b_stdout += b_output
b_tmp_stdout = b_unprocessed
if b_tmp_stderr:
b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
b_stderr += b_output
b_tmp_stderr = b_unprocessed
else:
b_stdout += b_tmp_stdout
b_stderr += b_tmp_stderr
b_tmp_stdout = b_tmp_stderr = b''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug('Sending become_pass in response to prompt')
stdin.write(to_bytes(self._play_context.become_pass) + b'\n')
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.debug('Escalation succeeded')
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.debug('Escalation failed')
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
elif self._flags['become_nopasswd_error']:
display.debug('Escalation requires password')
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self._play_context.become_method)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.debug('Escalation prompt repeated')
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if p.poll() is not None:
if not rpipes or not rfd:
break
# We should not see further writes to the stdout/stderr file
# descriptors after the process has closed, set the select
# timeout to gather any last writes we may have missed.
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus removed both from rpipes), we can
# just wait for it to exit.
elif not rpipes:
p.wait()
break
# Otherwise there may still be outstanding data to read.
# close stdin after process is terminated and stdout/stderr are read
# completely (see also issue #848)
stdin.close()
if C.HOST_KEY_CHECKING:
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
if p.returncode == 255 and in_data and checkrc:
raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
return (p.returncode, b_stdout, b_stderr)
def _exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_executable = self._play_context.ssh_executable
if not in_data and sudoable:
args = (ssh_executable, '-tt', self.host, cmd)
else:
args = (ssh_executable, self.host, cmd)
cmd = self._build_command(*args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
return (returncode, stdout, stderr)
def _file_transport_command(self, in_path, out_path, sftp_action):
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
# since this can be a non-bool now, we need to handle it correctly
scp_if_ssh = C.DEFAULT_SCP_IF_SSH
if not isinstance(scp_if_ssh, bool):
scp_if_ssh = scp_if_ssh.lower()
if scp_if_ssh in BOOLEANS:
scp_if_ssh = boolean(scp_if_ssh)
elif scp_if_ssh != 'smart':
raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
# create a list of commands to use based on config options
methods = ['sftp']
if scp_if_ssh == 'smart':
methods.append('scp')
elif scp_if_ssh:
methods = ['scp']
success = False
res = None
for method in methods:
if method == 'sftp':
cmd = self._build_command('sftp', to_bytes(host))
in_data = u"{0} {1} {2}\n".format(sftp_action, pipes.quote(in_path), pipes.quote(out_path))
elif method == 'scp':
cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, pipes.quote(out_path)))
in_data = None
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._run(cmd, in_data, checkrc=False)
# Check the return code and rollover to next method if failed
if returncode == 0:
success = True
break
else:
# If not in smart mode, the data will be printed by the raise below
if scp_if_ssh == 'smart':
display.warning(msg='%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
display.debug(msg='%s' % to_native(stdout))
display.debug(msg='%s' % to_native(stderr))
res = (returncode, stdout, stderr)
if not success:
raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}"\
.format(to_native(out_path), to_native(res[1]), to_native(res[2])))
#
# Main public methods
#
def exec_command(self, *args, **kwargs):
"""
Wrapper around _exec_command to retry in the case of an ssh failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* remaining_tries is <2
* retries limit reached
"""
remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1
cmd_summary = "%s..." % args[0]
for attempt in range(remaining_tries):
try:
return_tuple = self._exec_command(*args, **kwargs)
# 0 = success
# 1-254 = remote command return code
# 255 = failure from the ssh command itself
if return_tuple[0] != 255:
break
else:
raise AnsibleConnectionFailure("Failed to connect to the host via ssh: %s" % to_native(return_tuple[2]))
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause)
else:
msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause)
display.vv(msg, host=self.host)
time.sleep(pause)
continue
return return_tuple
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
self._file_transport_command(in_path, out_path, 'put')
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
self._file_transport_command(in_path, out_path, 'get')
def close(self):
# If we have a persistent ssh connection (ControlPersist), we can ask it
# to stop listening. Otherwise, there's nothing to do here.
# TODO: reenable once winrm issues are fixed
# temporarily disabled as we are forced to currently close connections after every task because of winrm
# if self._connected and self._persistent:
# ssh_executable = self._play_context.ssh_executable
# cmd = self._build_command(ssh_executable, '-O', 'stop', self.host)
#
# cmd = map(to_bytes, cmd)
# p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = p.communicate()
self._connected = False
|
wenottingham/ansible
|
lib/ansible/plugins/connection/ssh.py
|
Python
|
gpl-3.0
| 32,355
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from mycroft.messagebus.message import Message
from mycroft.util.log import getLogger
__author__ = 'jdorleans'
LOGGER = getLogger(__name__)
class EnclosureAPI:
"""
This API is intended to be used to interface with the hardware
that is running Mycroft. It exposes all possible commands which
can be sent to a Mycroft enclosure implementation.
Different enclosure implementations may implement this differently
and/or may ignore certain API calls completely. For example,
the eyes_color() API might be ignore on a Mycroft that uses simple
LEDs which only turn on/off, or not at all on an implementation
where there is no face at all.
"""
def __init__(self, ws):
self.ws = ws
def reset(self):
"""The enclosure should restore itself to a started state.
Typically this would be represented by the eyes being 'open'
and the mouth reset to its default (smile or blank).
"""
self.ws.emit(Message("enclosure.reset"))
def system_reset(self):
"""The enclosure hardware should reset any CPUs, etc."""
self.ws.emit(Message("enclosure.system.reset"))
def system_mute(self):
"""Turn off the system microphone (not listening for wakeword)."""
self.ws.emit(Message("enclosure.system.mute"))
def system_unmute(self):
"""Turn the system microphone on (listening for wakeword)."""
self.ws.emit(Message("enclosure.system.unmute"))
def system_blink(self, times):
"""The 'eyes' should blink the given number of times.
Args:
times (int): number of times to blink
"""
self.ws.emit(Message("enclosure.system.blink", {'times': times}))
def eyes_on(self):
"""Illuminate or show the eyes."""
self.ws.emit(Message("enclosure.eyes.on"))
def eyes_off(self):
"""Turn off or hide the eyes."""
self.ws.emit(Message("enclosure.eyes.off"))
def eyes_blink(self, side):
"""Make the eyes blink
Args:
side (str): 'r', 'l', or 'b' for 'right', 'left' or 'both'
"""
self.ws.emit(Message("enclosure.eyes.blink", {'side': side}))
def eyes_narrow(self):
"""Make the eyes look narrow, like a squint"""
self.ws.emit(Message("enclosure.eyes.narrow"))
def eyes_look(self, side):
"""Make the eyes look to the given side
Args:
side (str): 'r' for right
'l' for left
'u' for up
'd' for down
'c' for crossed
"""
self.ws.emit(Message("enclosure.eyes.look", {'side': side}))
def eyes_color(self, r=255, g=255, b=255):
"""Change the eye color to the given RGB color
Args:
r (int): 0-255, red value
g (int): 0-255, green value
b (int): 0-255, blue value
"""
self.ws.emit(Message("enclosure.eyes.color",
{'r': r, 'g': g, 'b': b}))
def eyes_brightness(self, level=30):
"""Set the brightness of the eyes in the display.
Args:
level (int): 1-30, bigger numbers being brighter
"""
self.ws.emit(Message("enclosure.eyes.level", {'level': level}))
def eyes_reset(self):
"""Restore the eyes to their default (ready) state."""
self.ws.emit(Message("enclosure.eyes.reset"))
def eyes_timed_spin(self, length):
"""Make the eyes 'roll' for the given time.
Args:
length (int): duration in milliseconds of roll, None = forever
"""
self.ws.emit(Message("enclosure.eyes.timedspin",
{'length': length}))
def eyes_volume(self, volume):
"""Indicate the volume using the eyes
Args:
volume (int): 0 to 11
"""
self.ws.emit(Message("enclosure.eyes.volume", {'volume': volume}))
def mouth_reset(self):
"""Restore the mouth display to normal (blank)"""
self.ws.emit(Message("enclosure.mouth.reset"))
def mouth_talk(self):
"""Show a generic 'talking' animation for non-synched speech"""
self.ws.emit(Message("enclosure.mouth.talk"))
def mouth_think(self):
"""Show a 'thinking' image or animation"""
self.ws.emit(Message("enclosure.mouth.think"))
def mouth_listen(self):
"""Show a 'thinking' image or animation"""
self.ws.emit(Message("enclosure.mouth.listen"))
def mouth_smile(self):
"""Show a 'smile' image or animation"""
self.ws.emit(Message("enclosure.mouth.smile"))
def mouth_viseme(self, code):
"""Display a viseme mouth shape for synched speech
Args:
code (int): 0 = shape for sounds like 'y' or 'aa'
1 = shape for sounds like 'aw'
2 = shape for sounds like 'uh' or 'r'
3 = shape for sounds like 'th' or 'sh'
4 = neutral shape for no sound
5 = shape for sounds like 'f' or 'v'
6 = shape for sounds like 'oy' or 'ao'
"""
self.ws.emit(Message("enclosure.mouth.viseme", {'code': code}))
def mouth_text(self, text=""):
"""Display text (scrolling as needed)
Args:
text (str): text string to display
"""
self.ws.emit(Message("enclosure.mouth.text", {'text': text}))
def weather_display(self, img_code, temp):
"""Show a weather icon (deprecated)"""
self.ws.emit(Message("enclosure.weather.display",
{'img_code': img_code, 'temp': temp}))
def activate_mouth_events(self):
"""Enable movement of the mouth with speech"""
self.ws.emit(Message('enclosure.mouth.events.activate'))
def deactivate_mouth_events(self):
"""Disable movement of the mouth with speech"""
self.ws.emit(Message('enclosure.mouth.events.deactivate'))
|
jasonehines/mycroft-core
|
mycroft/client/enclosure/api.py
|
Python
|
gpl-3.0
| 6,761
|
import jmbitcoin as btc
import pytest
def test_bip21_decode():
# These should raise exception because of not being valid BIP21 URI's
with pytest.raises(ValueError):
btc.decode_bip21_uri('')
btc.decode_bip21_uri('nfdjksnfjkdsnfjkds')
btc.decode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
btc.decode_bip21_uri(
'175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=20.3')
btc.decode_bip21_uri('bitcoin:')
btc.decode_bip21_uri('bitcoin:?amount=20.3')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=XYZ')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=100\'000')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=100,000')
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=100000000')
assert(btc.decode_bip21_uri('bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(btc.decode_bip21_uri('BITCOIN:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(btc.decode_bip21_uri('BitCoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?label=Luke-Jr')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['label'] == 'Luke-Jr')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=20.3&label=Luke-Jr')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['amount'] == 2030000000)
assert(parsed['label'] == 'Luke-Jr')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=50&label=Luke-Jr&message=Donation%20for%20project%20xyz')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['amount'] == 5000000000)
assert(parsed['label'] == 'Luke-Jr')
assert(parsed['message'] == 'Donation for project xyz')
# This should raise exception because of unknown req-* parameters
with pytest.raises(ValueError):
btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?req-somethingyoudontunderstand=50&req-somethingelseyoudontget=999')
parsed = btc.decode_bip21_uri(
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?somethingyoudontunderstand=50&somethingelseyoudontget=999')
assert(parsed['address'] == '175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W')
assert(parsed['somethingyoudontunderstand'] == '50')
assert(parsed['somethingelseyoudontget'] == '999')
def test_bip21_encode():
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {}) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'label': 'Luke-Jr'
}) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?label=Luke-Jr'
)
# Both dictionary and list of tuples should work
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('label', 'Luke-Jr')
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?label=Luke-Jr'
)
# Use list of tuples version for multiple parameter tests, as dicts don't
# have guaranteed ordering.
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', 20.3),
('label', 'Luke-Jr')
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=20.3&label=Luke-Jr'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', 50),
('label', 'Luke-Jr'),
('message', 'Donation for project xyz')
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?amount=50&label=Luke-Jr&message=Donation%20for%20project%20xyz'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('req-somethingyoudontunderstand', 50),
('req-somethingelseyoudontget', 999)
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?req-somethingyoudontunderstand=50&req-somethingelseyoudontget=999'
)
assert(
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('somethingyoudontunderstand', 50),
('somethingelseyoudontget', 999)
]) ==
'bitcoin:175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W?somethingyoudontunderstand=50&somethingelseyoudontget=999'
)
# Invalid amounts must raise ValueError
with pytest.raises(ValueError):
# test dicts
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': ''
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': 'XYZ'
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': '100\'000'
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': '100,000'
})
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', {
'amount': '100000000'
})
# test list of tuples
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', 'XYZ')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '100\'000')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '100,000')
])
btc.encode_bip21_uri('175tWpb8K1S7NmH4Zx6rewF9WQrcZv245W', [
('amount', '100000000')
])
|
undeath/joinmarket-clientserver
|
jmbitcoin/test/test_bip21.py
|
Python
|
gpl-3.0
| 6,062
|
#!/usr/bin/env python
import roslib;
import rospy
import serial
import time
#COM.baudrate=115200
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
global COM
def callback(data):
rospy.loginfo(rospy.get_caller_id()+"I heard %s",data.position)
global COM
buffer=[]
j =[0.7,0.5,0.0,0.2,-0.4,1.5]
j[0]=data.position[0]
while 1:
A = COM.read(1)
print A
buffer.append(A)
if A == '\n':
if "A" in buffer:
#print "GOT an A: ", buffer
#print "data pos 0:", data.position
S = str(data.position[0]) + ',' + str(data.position[1]) + ',' + str(j[2]) + ',' + str(j[3]) + ',' + str(j[4]) + ',' + str(j[5]) + "\r\n"
print "SENDING:",S
COM.write(S)
return
print buffer
buffer = []
def listener():
# in ROS, nodes are unique named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaenously.
rospy.init_node('listener', anonymous=True)
global COM
rospy.Subscriber("joint_angles", JointState, callback)
COM =serial.Serial('/dev/ttyUSB0',9600)
COM.parity = 'E'
COM.stopbits = 2
print COM
buffer = []
#r=rospy.Rate(10) # 10 hz
COM.write("1\r\n") # tells slave to init
r=rospy.Rate(10) # 10 hz
while rospy.is_shutdown()==False:
r.sleep()
# spin() simply keeps python from exiting until this node is stopped
#rospy.spinOnce()
if __name__ == '__main__':
listener()
if __name__=="__main__":
rospy.init_node('cyber_glove_teleop')
init_pos=[0.5,0.5,0.0,0.2,-0.4,1.5]
|
kuri-kustar/haptic_interface-project
|
manipulation/mitsubishi_arm_hardware_interface/scripts/writeJointRos.py
|
Python
|
gpl-3.0
| 1,782
|
# import bpy
# def draw_wire(context):
# area = context.area
# if context.area.type == 'VIEW_3D':
# target = bpy.context.area.spaces.active
# # target.draw_handler_add()
|
TriumphLLC/FashionProject
|
modules/draw/wires.py
|
Python
|
gpl-3.0
| 183
|
'''
SASMOL: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from unittest import main
from mocker import Mocker, MockerTestCase
import sasmol.system as system
import os
DataPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','data','sasmol','system')+os.path.sep
class Test_intg_system_Atom_setLoc(MockerTestCase):
def setUp(self):
self.o=system.Atom(3,'1CRN-3frames.pdb')
def test_1CRN_3frames(self):
'''
test a regular pdb file with 3 frame
'''
#
expected = [' ']*327
#
self.o.setLoc(expected)
#
result = self.o.loc()
print result
#
self.assertEqual(expected, result)
def tearDown(self):
pass
if __name__ == '__main__':
main()
|
StevenCHowell/zazmol
|
src/python/test_sasmol/test_system/test_intg_system_Atom_setLoc.py
|
Python
|
gpl-3.0
| 1,425
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def from_election_to_elections(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Election = apps.get_model("elections", "Election")
Candidate = apps.get_model("elections", "Candidate")
for candidate in Candidate.objects.all():
candidate.elections.add(candidate.election)
class Migration(migrations.Migration):
dependencies = [
('elections', '0020_auto_20150821_2101'),
]
operations = [
migrations.AddField(
model_name='candidate',
name='elections',
field=models.ManyToManyField(related_name='candidates', null=True, to='elections.Election'),
),
migrations.RunPython(from_election_to_elections),
]
|
ciudadanointeligente/votainteligente-portal-electoral
|
elections/migrations/0021_auto_20151008_1526.py
|
Python
|
gpl-3.0
| 928
|
# -*- coding: utf-8 -*-
from pyknyx.common.singleton import *
import unittest
# Mute logger
from pyknyx.services.logger import logging; logger = logging.getLogger(__name__)
from pyknyx.services.logger import logging
logger = logging.getLogger(__name__)
logging.getLogger("pyknyx").setLevel(logging.ERROR)
@six.add_metaclass(Singleton)
class SingletonTest(object):
pass
class SingletonTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_constructor(self):
s1 = SingletonTest()
s2 = SingletonTest()
self.assertIs(s1, s2)
|
knxd/pKNyX
|
tests/common/singleton.py
|
Python
|
gpl-3.0
| 617
|
##############################################################################
#
# Copyright (C) Zenoss, Inc. 2015, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
from Products.ZenTestCase.BaseTestCase import BaseTestCase
from ZenPacks.zenoss.ZenJMX.datasources.JMXDataSource import JMXDataSource
class TestJMXDataSource(BaseTestCase):
def afterSetUp(self):
self.ds = JMXDataSource(id='1')
def test_getDescription(self):
self.assertEqual(self.ds.getDescription(), '${dev/id}')
def test_getProtocols(self):
self.assertEqual(self.ds.getProtocols(), ['REMOTING-JMX', 'RMI', 'JMXMP'])
def test_zmanage_editProperties(self):
with self.assertRaises(AttributeError):
self.ds.zmanage_editProperties()
|
krull/docker-zenoss4
|
init_fs/usr/local/zenoss/ZenPacks/ZenPacks.zenoss.ZenJMX-3.12.1.egg/ZenPacks/zenoss/ZenJMX/tests/test_JMXDataSource.py
|
Python
|
gpl-3.0
| 960
|
""" VirtualMachineMonitorAgent plays the role of the watch dog for the Virtual Machine
"""
import os
import time
import glob
# DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, rootPath
from DIRAC.ConfigurationSystem.Client.Helpers import Operations
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities import List, Network
# VMDIRAC
from VMDIRAC.WorkloadManagementSystem.Client.ServerUtils import virtualMachineDB
__RCSID__ = "$Id$"
class VirtualMachineMonitorAgent( AgentModule ):
def __getCSConfig( self ):
if not self.runningPod:
return S_ERROR( "/LocalSite/RunningPod is not defined" )
#Variables coming from the vm
imgPath = "/Cloud/%s" % self.runningPod
for csOption, csDefault, varName in ( ( "MinWorkingLoad", 0.01, "vmMinWorkingLoad" ),
( "LoadAverageTimespan", 60, "vmLoadAvgTimespan" ),
( "HaltPeriod", 600, "haltPeriod" ),
( "HaltBeforeMargin", 300, "haltBeforeMargin" ),
( "HeartBeatPeriod", 300, "heartBeatPeriod" ),
):
path = "%s/%s" % ( imgPath, csOption )
value = self.op.getValue( path, csDefault )
if not value > 0:
return S_ERROR( "%s has an incorrect value, must be > 0" % path )
setattr( self, varName, value )
for csOption, csDefault, varName in (
( "JobWrappersLocation", "/scratch", "vmJobWrappersLocation" ),
):
path = "%s/%s" % ( imgPath, csOption )
value = gConfig.getValue( path, csDefault )
if not value :
return S_ERROR( "%s points to an empty string, cannot be!" % path )
setattr( self, varName, value )
self.haltBeforeMargin = max( self.haltBeforeMargin, int( self.am_getPollingTime() ) + 5 )
self.haltPeriod = max( self.haltPeriod, int( self.am_getPollingTime() ) + 5 )
self.heartBeatPeriod = max( self.heartBeatPeriod, int( self.am_getPollingTime() ) + 5 )
self.log.info( "** VM Info **" )
self.log.info( "Name : %s" % self.runningPod )
self.log.info( "Min Working Load : %f" % self.vmMinWorkingLoad )
self.log.info( "Load Avg Timespan : %d" % self.vmLoadAvgTimespan )
self.log.info( "Job wrappers location : %s" % self.vmJobWrappersLocation )
self.log.info( "Halt Period : %d" % self.haltPeriod )
self.log.info( "Halt Before Margin : %d" % self.haltBeforeMargin )
self.log.info( "HeartBeat Period : %d" % self.heartBeatPeriod )
if self.vmID:
self.log.info( "DIRAC ID : %s" % self.vmID )
if self.uniqueID:
self.log.info( "Unique ID : %s" % self.uniqueID )
self.log.info( "*************" )
return S_OK()
def __declareInstanceRunning( self ):
#Connect to VM monitor and register as running
retries = 3
sleepTime = 30
for i in range( retries ):
result = virtualMachineDB.declareInstanceRunning( self.uniqueID, self.ipAddress )
if result[ 'OK' ]:
self.log.info( "Declared instance running" )
return result
self.log.error( "Could not declare instance running", result[ 'Message' ] )
if i < retries - 1 :
self.log.info( "Sleeping for %d seconds and retrying" % sleepTime )
time.sleep( sleepTime )
return S_ERROR( "Could not declare instance running after %d retries" % retries )
def initialize( self ):
self.am_disableMonitoring()
#Init vars
self.runningPod = gConfig.getValue( '/LocalSite/RunningPod' )
self.log.info( "Running pod name of the image is %s" % self.runningPod )
self.vmID = gConfig.getValue( '/LocalSite/VMID' )
self.__loadHistory = []
self.vmMinWorkingLoad = None
self.vmLoadAvgTimespan = None
self.vmJobWrappersLocation = None
self.haltPeriod = None
self.haltBeforeMargin = None
self.heartBeatPeriod = None
self.am_setOption( "MaxCycles", 0 )
self.am_setOption( "PollingTime", 60 )
#Discover net address
netData = Network.discoverInterfaces()
for iface in sorted( netData ):
if iface.find( "eth" ) == 0:
self.ipAddress = netData[ iface ][ 'ip' ]
break
self.log.info( "IP Address is %s" % self.ipAddress )
#getting the stop policy
self.op = Operations.Operations()
self.vmStopPolicy = self.op.getValue( "Cloud/%s/VMStopPolicy", 'elastic' )
self.log.info( "vmStopPolicy is %s" % self.vmStopPolicy )
#Declare instance running
self.uniqueID = ''
result = virtualMachineDB.getUniqueIDByName( self.vmID )
if result['OK']:
self.uniqueID = result['Value']
result = self.__declareInstanceRunning()
if not result[ 'OK' ]:
self.log.error( "Could not declare instance running", result[ 'Message' ] )
self.__haltInstance()
return S_ERROR( "Halting!" )
self.__instanceInfo = result[ 'Value' ]
#Get the cs config
result = self.__getCSConfig()
if not result[ 'OK' ]:
return result
return S_OK()
def __getLoadAvg( self ):
result = self.__getCSConfig()
if not result[ 'OK' ]:
return result
with open( "/proc/loadavg", "r" ) as fd:
data = [ float( v ) for v in List.fromChar( fd.read(), " " )[:3] ]
self.__loadHistory.append( data )
numRequiredSamples = max( self.vmLoadAvgTimespan / self.am_getPollingTime(), 1 )
while len( self.__loadHistory ) > numRequiredSamples:
self.__loadHistory.pop( 0 )
self.log.info( "Load averaged over %d seconds" % self.vmLoadAvgTimespan )
self.log.info( " %d/%s required samples to average load" % ( len( self.__loadHistory ),
numRequiredSamples ) )
avgLoad = 0
for f in self.__loadHistory:
avgLoad += f[0]
return avgLoad / len( self.__loadHistory ), len( self.__loadHistory ) == numRequiredSamples
def __getNumJobWrappers( self ):
if not os.path.isdir( self.vmJobWrappersLocation ):
return 0
self.log.info( "VM job wrappers path: %s" % self.vmJobWrappersLocation )
jdlList = glob.glob( os.path.join( self.vmJobWrappersLocation, "*", "*.jdl" ) )
return len( jdlList )
def execute( self ):
#Get load
avgLoad, avgRequiredSamples = self.__getLoadAvg()
self.log.info( "Load Average is %.2f" % avgLoad )
if not avgRequiredSamples:
self.log.info( " Not all required samples yet there" )
#Do we need to send heartbeat?
with open( "/proc/uptime" ) as fd:
uptime = float( List.fromChar( fd.read().strip(), " " )[0] )
hours = int( uptime / 3600 )
minutes = int( uptime - hours * 3600 ) / 60
seconds = uptime - hours * 3600 - minutes * 60
self.log.info( "Uptime is %.2f (%d:%02d:%02d)" % ( uptime, hours, minutes, seconds ) )
#Num jobs
numJobs = self.__getNumJobWrappers()
self.log.info( "There are %d job wrappers" % numJobs )
if uptime % self.heartBeatPeriod <= self.am_getPollingTime():
#Heartbeat time!
self.log.info( "Sending hearbeat..." )
result = virtualMachineDB.instanceIDHeartBeat( self.uniqueID, avgLoad, numJobs, 0, 0, )
status = None
if result[ 'OK' ]:
self.log.info( " heartbeat sent!" )
status = result['Value']
else:
if "Transition" in result["Message"]:
self.log.error( "Error on service:", result[ 'Message' ] )
status = result['State']
else:
self.log.error("Connection error", result["Message"])
if status:
self.__processHeartBeatMessage( status, avgLoad )
#Do we need to check if halt?
if avgRequiredSamples and uptime % self.haltPeriod + self.haltBeforeMargin > self.haltPeriod:
self.log.info( "Load average is %s (minimum for working instance is %s)" % ( avgLoad,
self.vmMinWorkingLoad ) )
#current stop polices: elastic (load) and never
if self.vmStopPolicy == 'elastic':
#If load less than X, then halt!
if avgLoad < self.vmMinWorkingLoad:
self.__haltInstance( avgLoad )
if self.vmStopPolicy == 'never':
self.log.info( "VM stop policy is defined as never (until SaaS or site request)")
return S_OK()
def __processHeartBeatMessage( self, hbMsg, avgLoad = 0.0 ):
if hbMsg == 'stop':
#Write stop file for jobAgent
self.log.info( "Received STOP signal. Writing stop files..." )
for agentName in [ "WorkloadManagement/JobAgent" ]:
ad = os.path.join( *agentName.split( "/" ) )
stopDir = os.path.join( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), 'control', ad )
stopFile = os.path.join( stopDir, "stop_agent" )
try:
if not os.path.isdir( stopDir ):
os.makedirs( stopDir )
fd = open( stopFile, "w" )
fd.write( "stop!" )
fd.close()
self.log.info( "Wrote stop file %s for agent %s" % ( stopFile, agentName ) )
except Exception, e:
self.log.error( "Could not write stop agent file", stopFile )
if hbMsg == 'halt':
self.__haltInstance( avgLoad )
def __haltInstance( self, avgLoad = 0.0 ):
self.log.info( "Halting instance..." )
retries = 3
sleepTime = 10
for i in range( retries ):
result = virtualMachineDB.declareInstanceHalting( self.uniqueID, avgLoad )
if result[ 'OK' ]:
self.log.info( "Declared instance halting" )
break
self.log.error( "Could not send halting state:", result[ 'Message' ] )
if i < retries - 1 :
self.log.info( "Sleeping for %d seconds and retrying" % sleepTime )
time.sleep( sleepTime )
#self.log.info( "Executing system halt..." )
#os.system( "halt" )
|
atsareg/VMDIRAC
|
VMDIRAC/WorkloadManagementSystem/Agent/VirtualMachineMonitorAgent.py
|
Python
|
gpl-3.0
| 9,987
|
#!/usr/bin/env python
# --!-- coding: utf8 --!--
"""
The converters package provide functions to quickly convert on the fly from
one format to another. It is responsible to check what external library are
present, and do the job as best as possible with what we have in hand.
"""
from manuskript.converters.abstractConverter import abstractConverter
from manuskript.converters.pandocConverter import pandocConverter
#from manuskript.converters.markdownConverter import markdownConverter
def HTML2MD(html):
# Convert using pandoc
if pandocConverter.isValid():
return pandocConverter.convert(html, _from="html", to="markdown")
# Convert to plain text using QTextEdit
return HTML2PlainText(html)
def HTML2PlainText(html):
"""
Convert from HTML to plain text.
"""
if pandocConverter.isValid():
return pandocConverter.convert(html, _from="html", to="plain")
# Last resort: probably resource inefficient
e = QTextEdit()
e.setHtml(html)
return e.toPlainText()
|
gedakc/manuskript
|
manuskript/converters/__init__.py
|
Python
|
gpl-3.0
| 1,027
|
# -*- coding: latin-1 -*-
import re
import json
from .common import InfoExtractor
from ..utils import determine_ext
class HarkIE(InfoExtractor):
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
_TEST = {
u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
u'file': u'mmbzyhkgny.mp3',
u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
u'info_dict': {
u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
u'duration': 11,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
json_url = "http://www.hark.com/clips/%s.json" %(video_id)
info_json = self._download_webpage(json_url, video_id)
info = json.loads(info_json)
final_url = info['url']
return {'id': video_id,
'url' : final_url,
'title': info['name'],
'ext': determine_ext(final_url),
'description': info['description'],
'thumbnail': info['image_original'],
'duration': info['duration'],
}
|
lebabouin/CouchPotatoServer-develop
|
couchpotato/core/providers/trailer/vftrailers/youtube_dl/extractor/hark.py
|
Python
|
gpl-3.0
| 1,526
|
'''
main tuning script, LCLS
'''
import numpy as np
from ocelot.mint.mint import Optimizer, Action
from ocelot.mint.flash1_interface import FLASH1MachineInterface, FLASH1DeviceProperties, TestInterface
mi = FLASH1MachineInterface()
dp = FLASH1DeviceProperties()
#opt = Optimizer(mi, dp)
opt = Optimizer(TestInterface(), dp)
opt.debug = True
opt.logging = True
opt.log_file = 'test.log'
opt.timeout = 1.2
seq1 = [Action(func=opt.max_sase, args=[ ['H10SMATCH','H12SMATCH'], 'simplex'] ) ]
seq2 = [Action(func=opt.max_sase, args=[ ['V14SMATCH','V7SMATCH'], 'simplex' ] )]
seq3 = [Action(func=opt.max_sase, args=[ ['V14SMATCH','V7SMATCH','H10SMATCH','H12SMATCH'], 'simplex' ] )]
seq4 = [Action(func=opt.max_sase, args=[ ['Q13SMATCH','Q15SMATCH'], 'simplex' ] )]
seq5 = [Action(func=opt.max_sase, args=[ ['H3DBC3','V3DBC3'], 'simplex' ] )]
seq6 = [Action(func=opt.max_sase, args=[ ['H3DBC3','V3DBC3','H10ACC7','V10ACC7'], 'simplex' ] )]
seq7 = [Action(func=opt.max_sase, args=[ ['Q5UND1.3.5','Q5UND2.4'], 'simplex' ] )]
seq8 = [Action(func=opt.max_sase, args=[ ['H3UND1','H3UND3','H3UND4','H3UND5'], 'simplex' ] )]
seq9 = [Action(func=opt.max_sase, args=[ ['H8TCOL','V8TCOL'], 'simplex' ] )]
seq10 = [Action(func=opt.max_sase, args=[ ['H3DBC3'], 'simplex' ] )]
seq0 = [Action(func=opt.max_sase, args=[ ['H10SMATCH','H12SMATCH'], 'cg', {'maxiter':15}] ),
Action(func=opt.max_sase, args=[ ['H10SMATCH','H12SMATCH'], 'simplex', {'maxiter':25}] )]
opt.eval(seq1)
"""
#import json
def get_dict(lat, bpms):
dict_bpms = {}
for elem in lat.sequence:
if elem.type == "monitor" and elem.mi_id in bpms:
dict_bpms[elem.mi_id] = {}
dict_bpms[elem.mi_id]["x"] = elem.x
dict_bpms[elem.mi_id]["y"] = elem.y
return dict_bpms
#dp = FLASH1DeviceProperties()
def apply_bump(names, currents, dIs, alpha):
mi.set_value(names, currents+dIs*alpha)
cors = ['H3DBC3', 'H10ACC4','H9ACC5', 'H10ACC5', 'H9ACC6', 'H10ACC6', 'H10ACC7']
dI = np.array([-0.0114768844711, -0.183727960466, 0.325959042831, 0.318743893708, 0.15280311903, 0.130996600233, -0.831909116508])
currents = np.array([ -0.0229914523661, 0.0250000003725, 0.985000014305, 0.0, -1.17299997807, 0.0, 0.148000001907])
bump = {"correctors":cors, "dI": dI, "currents":currents}
alpha = 0.1
seq_bump = [Action(func=opt.max_sase_bump, args=[ bump, alpha, 'simplex' ] )]
orbit = {}
orbit["correctors"] = ['H3SFELC', 'H4SFELC', 'H10SMATCH', 'D11SMATCH', 'H12SMATCH']
setup = log.MachineSetup()
#setup.save_lattice(lat, "init.txt")
lat_all = MagneticLattice(lattice)
setup.load_lattice("init.txt", lat_all)
orbit["bpms"] = get_dict(lat, bpms)
seq_min_orb = [Action(func=opt.min_orbit, args=[orbit, 'simplex' ] )]
opt.eval(seq_bump)
apply_bump(cors, currents, dI, alpha=0.1)
"""
|
sserkez/ocelot
|
mint/flash_tune.py
|
Python
|
gpl-3.0
| 2,806
|
import numpy as np
from esdl.cube_provider import CubeSourceProvider
from esdl.cube_config import CubeConfig
class TestCubeSourceProvider(CubeSourceProvider):
"""
CubeSourceProvider implementation used for testing cube generation without any source files.
The following usage generates a cube with two variables ``test_1`` and ``test_2``:
cube-gen -c ./myconf.py ./mycube test:var=test_1 test:var=test_2
:param cube_config: Specifies the fixed layout and conventions used for the cube.
:param name: The provider's registration name. Defaults to ``"test"``.
:param var: Name of a (float32) variable which will be filled with random numbers.
"""
def __init__(self, cube_config: CubeConfig, name: str = 'test', var: str = 'test'):
super(TestCubeSourceProvider, self).__init__(cube_config, name)
self._variable_name = var
self._value = 0.0
def prepare(self):
pass
@property
def temporal_coverage(self):
return self.cube_config.start_time, self.cube_config.end_time
@property
def spatial_coverage(self):
return 0, 0, self.cube_config.grid_width, self.cube_config.grid_height
@property
def variable_descriptors(self):
return {
self._variable_name: {
'data_type': np.float32,
'fill_value': np.nan,
'scale_factor': 1.0,
'add_offset': 0.0,
}
}
def compute_variable_images(self, period_start, period_end):
self._value += 0.1
image_width = self.cube_config.grid_width
image_height = self.cube_config.grid_height
image_shape = (image_height, image_width)
return {
self._variable_name: np.full(image_shape, self._value, dtype=np.float32)
}
def close(self):
pass
|
CAB-LAB/cablab-core
|
esdl/providers/test_provider.py
|
Python
|
gpl-3.0
| 1,856
|
from sqlalchemy.schema import (
Table,
Column,
MetaData,
ForeignKey)
from sqlalchemy.types import (
Text,
JSON,
DateTime,
Integer,
String)
from collections import defaultdict
from uuid import uuid4
import datetime
class SchemaStore:
def __init__(self):
self.metadata = defaultdict(MetaData)
self.tables = defaultdict(list)
def _import_schema(self, schema_name):
def fkey(target):
return ForeignKey(schema_name + '.' + target)
def make_uuid():
return str(uuid4())
metadata = self.metadata[schema_name]
tables = self.tables[schema_name]
tables.append(Table(
"ealgis_metadata", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256), nullable=False),
Column('family', String(256), nullable=True),
Column('uuid', String(36), nullable=False, default=make_uuid),
Column('description', Text(), nullable=False),
Column('date_created', DateTime(timezone=True), default=datetime.datetime.utcnow, nullable=False),
Column('date_published', DateTime(timezone=True), nullable=False),
schema=schema_name))
tables.append(Table(
"dependencies", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256), nullable=False),
Column('uuid', String(36), nullable=False),
schema=schema_name))
tables.append(Table(
"table_info", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(256)),
Column('metadata_json', JSON()),
schema=schema_name))
tables.append(Table(
"column_info", metadata,
Column('id', Integer, primary_key=True),
Column('table_info_id', Integer, fkey('table_info.id'), nullable=False),
Column('name', String(256)),
Column('schema_name', String(256)),
Column('metadata_json', JSON()),
schema=schema_name))
tables.append(Table(
"geometry_source", metadata,
Column('id', Integer, primary_key=True),
Column('table_info_id', Integer, fkey('table_info.id'), nullable=False),
Column('gid_column', String(256)),
Column('geometry_type', String(256)),
schema=schema_name))
tables.append(Table(
"geometry_source_projection", metadata,
Column('id', Integer, primary_key=True),
Column('geometry_source_id', Integer, fkey('table_info.id'), nullable=False),
Column('geometry_column', String(256)),
Column('srid', Integer),
schema=schema_name))
tables.append(Table(
"geometry_linkage", metadata,
Column('id', Integer, primary_key=True),
# in the source schema: may not be the same schema as this Table instance
Column('geometry_source_schema_name', String, nullable=False),
Column('geometry_source_id', Integer, nullable=False),
# these must be in this schema
Column('attr_table_id', Integer, fkey('table_info.id'), nullable=False),
Column('attr_column', String(256)),
schema=schema_name))
tables.append(Table(
"mailbox", metadata,
Column('id', Integer, primary_key=True),
Column('from', String(256)),
Column('to', String(256)),
Column('message', JSON()),
schema=schema_name))
def load_schema(self, schema_name):
if schema_name not in self.metadata:
self._import_schema(schema_name)
return self.metadata[schema_name], self.tables[schema_name]
store = SchemaStore()
|
grahame/ealgis
|
django/ealgis/dataschema/schema_v1.py
|
Python
|
gpl-3.0
| 3,861
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from zope.interface import Interface
class IIndexableByStartDateTime(Interface):
def getAdjustedStartDate(self):
"""
Returns a tz-aware datetime
"""
class IIndexableByEndDateTime(Interface):
def getAdjustedEndDate(self):
"""
Returns a tz-aware datetime
"""
class IIndexableByArbitraryDateTime(Interface):
def getIndexingDateTime():
"""
Return an arbitrary tz-aware datetime (class will decide what)
"""
class IIndexableById(Interface):
def getId():
"""
Return the id of the object
"""
|
pferreir/indico-backup
|
indico/core/index/adapter.py
|
Python
|
gpl-3.0
| 1,381
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Crash when decoder returns non-string
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
mancoast/CPythonPyc_test
|
cpython/278_test_io.py
|
Python
|
gpl-3.0
| 115,068
|
import os
import re
import json
import importlib
# Django Libraries
from django.http import HttpResponse, HttpResponseServerError
# CloudScape Libraries
from cloudscape.common import config
from cloudscape.common import logger
from cloudscape.common.vars import T_BASE
from cloudscape.engine.api.base import APIBase
from cloudscape.common.utils import JSONTemplate
from cloudscape.engine.api.auth.key import APIKey
from cloudscape.engine.api.auth.acl import ACLGateway
from cloudscape.common.utils import valid, invalid
from cloudscape.engine.api.auth.token import APIToken
from cloudscape.engine.api.app.auth.models import DBAuthEndpoints
from cloudscape.engine.api.app.user.models import DBUserDetails
# Configuration / Logger
CONF = config.parse()
LOG = logger.create('cloudscape.engine.api.core.request', CONF.server.log)
def dispatch(request):
"""
The entry point for all API requests. Called for all endpoints from the Django
URLs file. Creates a new instance of the EndpointManager class, and returns any
HTTP response to the client that opened the API request.
:param request: The Django request object
:type request: object
:rtype: object
"""
try:
# Return the response from the endpoint handler
return EndpointManager(request).handler()
# Critical server error
except Exception as e:
LOG.exception('Internal server error: %s' % str(e))
# Return a 500 error
return HttpResponseServerError('Internal server error, please contact your administrator.')
class EndpointManager:
"""
The endpoint request manager class. Serves as the entry point for all API request,
both for authentication requests, and already authenticated requests. Constructs
the base API class, loads API utilities, and performs a number of other functions
to prepare the API for the incoming request.
The EndpointManager class is instantiated by the dispatch method, which is called
by the Django URLs module file. It is initialized with the Django request object.
"""
def __init__(self, request):
self.request_raw = request
# Request properties
self.method = None
self.request = None
self.endpoint = None
self.action = None
self.path = None
# Request endpoint handler
self.handler_obj = None
# API parameters
self.api_name = None
self.api_mod = None
self.api_class = None
self.api_user = None
self.api_group = None
# API base object
self.api_base = None
# Request error
def _req_error(self, err):
err_response = {
'message': 'An error occured when processing the API request',
'endpoint': self.endpoint,
'error': err
}
LOG.error('%s:%s' % (self.endpoint, err))
return HttpResponse(json.dumps(err_response), content_type='application/json', status=400)
def _authenticate(self):
"""
Authenticate the API request.
"""
# Set the API user and group
self.api_user = self.request['api_user']
self.api_group = None if not ('api_group' in self.request) else self.request['api_group']
LOG.info('Authenticating API user: %s, group=%s' % (self.api_user, repr(self.api_group)))
# Authenticate key for token requests
if self.endpoint == 'auth/get':
auth_status = APIKey().validate(self.request)
if not auth_status['valid']:
return self._req_error(auth_status['content'])
LOG.info('API key authentication successfull for user: %s' % self.api_user)
# Authenticate token for API requests
else:
if not APIToken().validate(self.request):
return self._req_error('Failed to validate API token for user \'%s\'' % self.api_user)
LOG.info('API token authentication successfull for user: %s' % self.api_user)
# Check for a user account
if DBUserDetails.objects.filter(username=self.api_user).count():
# If no API group was supplied
if not self.api_group:
return self._req_error('User accounts must supply a group UUID when making a request using the <api_group> parameter')
# Make sure the group exists and the user is a member
is_member = False
for group in DBUserDetails.objects.get(username=self.api_user).get_groups():
if group['uuid'] == self.api_group:
is_member = True
break
# If the user is not a member of the group
if not is_member:
return self._req_error('User account <%s> is not a member of group <%s>' % (self.api_user, self.api_group))
# Validate the request
def _validate(self):
# Request body / method
self.request = json.loads(self.request_raw.body)
self.method = self.request_raw.META['REQUEST_METHOD']
# Make sure a request action is set
if not 'action' in self.request:
return self._req_error('Request body requires an <action> parameter for endpoint pathing')
self.action = self.request['action']
# Get the request path
self.path = re.compile('^\/(.*$)').sub(r'\g<1>', self.request_raw.META['PATH_INFO'])
# Set the request endpoint
self.endpoint = '%s/%s' % (self.path, self.action)
# Map the path to a module, class, and API name
self.handler_obj = EndpointMapper(self.endpoint, self.method).handler()
if not self.handler_obj['valid']:
return self._req_error(self.handler_obj['content'])
# Validate the request body
request_err = JSONTemplate(self.handler_obj['content']['api_map']).validate(self.request)
if request_err:
return self._req_error(request_err)
# Set the handler objects
self.api_name = self.handler_obj['content']['api_name']
self.api_mod = self.handler_obj['content']['api_mod']
self.api_class = self.handler_obj['content']['api_class']
self.api_utils = self.handler_obj['content']['api_utils']
def handler(self):
"""
The endpoint manager request handler. Performs a number of validation steps before
passing off the request to the API utility class.
1.) Looks for the base required request parameters
2.) Maps the endpoint and request action to an API utility and validates the request body
3.) Authenticates the user and API key/token
4.) Initializes any required Socket.IO connections for web clients
5.) Launches the API utility class to process the request
6.) Returns either an HTTP response with the status of the request
"""
# Parse the request
try:
validate_err = self._validate()
if validate_err:
return validate_err
except Exception as e:
LOG.exception('Exception while validating request: %s' % str(e))
return self._req_error('Internal server error, failed to validate the request')
# Authenticate the request
try:
auth_err = self._authenticate()
if auth_err:
return auth_err
except Exception as e:
LOG.exception('Exception while authenticating the request: %s' % str(e))
return self._req_error('Internal server error, failed to authenticate the request')
# Check the request against ACLs
acl_gateway = ACLGateway(self.request, self.endpoint, self.api_user)
# If the user is not authorized for this endpoint/object combination
if not acl_gateway.authorized:
return self._req_error(acl_gateway.auth_error)
# Set up the API base
try:
# Create an instance of the APIBase and run the constructor
api_obj = APIBase(
name = self.api_name,
endpoint = self.endpoint,
utils = self.api_utils,
acl = acl_gateway
).construct(self.request_raw)
# Make sure the construct ran successfully
if not api_obj['valid']:
return self._req_error(api_obj['content'])
# Set the API base object for endpoint utilities
self.api_base = api_obj['content']
# Failed to setup the APIBase
except Exception as e:
LOG.exception('Failed to set up API base: %s' % str(e))
return self._req_error('Internal server, failed to set up API base')
# Load the handler module and class
handler_mod = importlib.import_module(self.api_mod)
handler_class = getattr(handler_mod, self.api_class)
handler_inst = handler_class(self.api_base)
# Launch the request handler and return the response
try:
response = handler_inst.launch()
# Critical error when running handler
except Exception as e:
LOG.exception('Exeption while running API handler: %s' % str(e))
return self._req_error('Encountered API handler error')
# Close any open SocketIO connections
self.api_base.socket.disconnect()
# Return either a valid or invalid request response
if response['valid']:
return self.api_base.log.success(response['content'], response['data'])
return self.api_base.log.error(code=response['code'], log_msg=response['content'])
class EndpointMapper:
"""
API class used to construct the endpoint map. Scans the endpoint request templates
in the API templates directory to construct a map used to load required utilities
and modules, as well as validate the request for each endpoint. Each map also contains
ACL parameters used when constructing the ACL database tables.
"""
def __init__(self, endpoint=None, method=None):
"""
Construct the EndpointMapper class.
@param endpoint: The endpoint path
@type endpoint: str
@param method: The request method
@type method: str
"""
self.endpoint = endpoint
self.method = method
self.map = {}
def _merge_auth(self,j,e):
"""
Helper method used to merge token authentication parameters into the endpoint
request map. Mainly so I don't have to redundantly include the same code in
every map. Also makes modification much easier.
"""
# Ignore the authentication endpoint, as this is used to retrieve the token
if e == 'auth/get':
return
# Required / optional connection parameters
j['root']['_required'].extend(['api_user', 'api_token', 'action'])
j['root']['_optional'].extend(['api_group'])
# Connection parameter types
t = { 'api_user': 'str', 'api_token': 'str', 'action': 'str', 'api_group': 'uuid4' }
for k,v in t.iteritems():
j['root']['_children'][k] = { '_type': v }
def _merge_socket(self,j):
"""
Merge request parameters for web socket request. Used for handling connections
being passed along by the Socket.IO API proxy.
"""
# Load the socket request validator map
sv = json.loads(open('%s/socket.json' % T_BASE, 'r').read())
# Make sure the '_children' key exists
if not '_children' in j['root']:
j['root']['_children'] = {}
# Merge the socket parameters map
j['root']['_children']['socket'] = sv
j['root']['_optional'].append('socket')
def _build_map(self):
"""
Load all endpoint definitions.
"""
for endpoint in list(DBAuthEndpoints.objects.all().values()):
# Try to load the request map
try:
endpoint_rmap = json.loads(endpoint['rmap'])
# Map base object
rmap_base = {
'root': endpoint_rmap
}
# Merge the web socket request validator
self._merge_socket(rmap_base)
# Merge the authentication request validation parameters
self._merge_auth(rmap_base, endpoint['name'])
# Load the endpoint request handler module string
self.map[endpoint['name']] = {
'module': endpoint['mod'],
'class': endpoint['cls'],
'name': endpoint['name'],
'desc': endpoint['desc'],
'method': endpoint['method'],
'utils': None if not endpoint['utils'] else json.loads(endpoint['utils']),
'json': rmap_base
}
# Error constructing request map, skip to next endpoint map
except Exception as e:
LOG.exception('Failed to load request map for endpoint <%s>: %s ' % (endpoint['name'], str(e)))
continue
# All template maps constructed
return valid(LOG.info('Constructed API template map'))
def handler(self):
"""
Main method for constructing and returning the endpoint map.
@return valid|invalid
"""
map_rsp = self._build_map()
if not map_rsp['valid']:
return map_rsp
# Request path missing
if not self.endpoint:
return invalid(LOG.error('Missing request endpoint'))
# Invalid request path
if not self.endpoint in self.map:
return invalid(LOG.error('Unsupported request endpoint: <%s>' % self.endpoint))
# Verify the request method
if self.method != self.map[self.endpoint]['method']:
return invalid(LOG.error('Unsupported request method <%s> for endpoint <%s>' % (self.method, self.endpoint)))
# Get the API module, class handler, and name
self.handler_obj = {
'api_mod': self.map[self.endpoint]['module'],
'api_class': self.map[self.endpoint]['class'],
'api_name': self.map[self.endpoint]['name'],
'api_utils': self.map[self.endpoint]['utils'],
'api_map': self.map[self.endpoint]['json']
}
LOG.info('Parsed handler object for API endpoint <%s>: %s' % (self.endpoint, self.handler_obj))
# Return the handler module path
return valid(self.handler_obj)
|
djtaylor/cloudscape-DEPRECATED
|
python/cloudscape/engine/api/core/request.py
|
Python
|
gpl-3.0
| 15,200
|
# -*- coding: utf-8 -*-
# strsync - Automatically translate and synchronize .strings files from defined base language.
# Copyright (c) 2015 metasmile cyrano905@gmail.com (github.com/metasmile)
from __future__ import print_function
import strparser, strparser_intentdefinition, strlocale, strtrans
import time, os, sys, argparse, codecs, csv
from os.path import expanduser
from fuzzywuzzy import fuzz
from colorama import init
from colorama import Fore, Back, Style
import unicodedata2
init(autoreset=True)
def len_unicode(ustr):
return len(unicodedata2.normalize('NFC', ustr.decode('utf-8')))
def resolve_file_path(file):
return os.path.join(os.path.dirname(__file__), file)
def join_path_all(target_dir, target_files):
return map(lambda f: os.path.join(target_dir, f), target_files)
def rget(dictionary, key):
items = []
if key in dictionary:
items.append(dictionary[key])
for dict_value in [value for value in dictionary.values() if isinstance(value, dict)]:
items += rget(dict_value, key)
return items
def main():
parser = argparse.ArgumentParser(
description='Automatically translate and synchronize .strings files from defined base language.')
parser.add_argument('-b', '--base-lang-name',
help='A base(or source) localizable resource name.(default=\'Base\'), (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')',
default='Base', required=False)
parser.add_argument('-x', '--excluding-lang-names', type=str,
help='A localizable resource name that you want to exclude. (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')',
default=[], required=False, nargs='+')
parser.add_argument('-f', '--force-translate-keys', type=str,
help='Keys in the strings to update and translate by force. (input nothing for all keys.)',
default=[], required=False, nargs='*')
parser.add_argument('-o', '--following-base-keys', type=str, help='Keys in the strings to follow from "Base.',
default=[], required=False, nargs='+')
parser.add_argument('-w', '--following-base-if-not-exists', type=str, help='With this option, all keys will be followed up with base values if they does not exist.',
default=None, required=False, nargs='*')
parser.add_argument('-l', '--cutting-length-ratio-with-base', type=float,
help='Keys in the float as the ratio to compare the length of "Base"',
default=[], required=False, nargs='+')
parser.add_argument('-c', '--ignore-comments', help='Allows ignoring comment synchronization.', default=None,
required=False, nargs='*')
parser.add_argument('-v', '--verify-results', help='Verify translated results via reversed results', default=None,
required=False, nargs='*')
parser.add_argument('-s', '--include-secondary-languages', help='Include Additional Secondary Languages. (+63 language codes)', default=None,
required=False, nargs='*')
parser.add_argument('-i', '--ignore-unverified-results',
help='Allows ignoring unverified results when appending them.', default=None, required=False,
nargs='*')
parser.add_argument('target path', help='Target localization resource path. (root path of Base.lproj, default=./)',
default='./', nargs='?')
parser.add_argument('only for keys', help='Some specified keys for exclusive work. All operations will work for only that keys therefore other keys will be ignored. Not specified by default. (default=None)',
default=None, nargs='*')
args = vars(parser.parse_args())
reload(sys)
sys.setdefaultencoding('utf-8')
# configure arguments
__LOCALE_XCODE_BASE_LOWERCASE__ = 'base'
__DIR_SUFFIX__ = ".lproj"
__FILE_SUFFIX__ = ".strings"
__FILE_INTENT_SUFFIX__ = ".intentdefinition"
__FILE_DICT_SUFFIX__ = ".stringsdict"
__RESOURCE_PATH__ = expanduser(args['target path'])
__ONLY_FOR_KEYS__ = args['only for keys']
__BASE_LANG__ = args['base_lang_name']
__EXCLUDING_LANGS__ = args['excluding_lang_names']
__KEYS_FORCE_TRANSLATE__ = args['force_translate_keys']
__KEYS_FORCE_TRANSLATE_ALL__ = ('--force-translate-keys' in sys.argv or '-f' in sys.argv) and not __KEYS_FORCE_TRANSLATE__
__KEYS_FOLLOW_BASE__ = args['following_base_keys']
__CUTTING_LENGTH_RATIO__ = (args['cutting_length_ratio_with_base'] or [0])[0]
__FOLLOWING_ALL_KEYS_IFNOT_EXIST__ = args['following_base_if_not_exists'] is not None
__IGNORE_COMMENTS__ = args['ignore_comments'] is not None
__IGNORE_UNVERIFIED_RESULTS__ = args['ignore_unverified_results'] is not None
__RATIO_TO_IGNORE_UNVERIFIED_RESULTS__ = int(
args['ignore_unverified_results'][0]) if __IGNORE_UNVERIFIED_RESULTS__ and len(
args['ignore_unverified_results']) else 0
__VERIFY_TRANS_RESULTS__ = __IGNORE_UNVERIFIED_RESULTS__ or args['verify_results'] is not None
__INCLUDE_SECONDARY_LANGUAGES__ = args['include_secondary_languages'] is not None
# Locale settings
# [language designator] en, fr
# [language designator]_[region designator] en_GB, zh_HK
# [language designator]-[script designator] az-Arab, zh-Hans
# [language designator]-[script designator]_[region designator] zh-Hans_HK
print('(i) Initializing for supported languages ...')
__lang_codes = strlocale.default_supporting_xcode_lang_codes()
if __INCLUDE_SECONDARY_LANGUAGES__:
__lang_codes += strlocale.secondary_supporting_xcode_lang_codes()
__XCODE_LPROJ_SUPPORTED_LOCALES_MAP__ = strlocale.map_locale_codes(__lang_codes, strtrans.supported_locale_codes())
__XCODE_LPROJ_SUPPORTED_LOCALES__ = __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__.keys()
print(Fore.WHITE + '(i) Supported numbers of locale code :', str(len(__XCODE_LPROJ_SUPPORTED_LOCALES__)),
Style.RESET_ALL)
print(__XCODE_LPROJ_SUPPORTED_LOCALES__)
# handle base
if __BASE_LANG__.endswith(__DIR_SUFFIX__):
__BASE_RESOUCE_DIR__ = __BASE_LANG__
__BASE_LANG__ = __BASE_LANG__.split(__DIR_SUFFIX__)[0]
else:
__BASE_RESOUCE_DIR__ = __BASE_LANG__ + __DIR_SUFFIX__
if not __BASE_LANG__.lower() == __LOCALE_XCODE_BASE_LOWERCASE__:
__BASE_LANG__ = strlocale.lang(__BASE_LANG__)
# setup Translator & langs
# read ios langs
print(Fore.WHITE + '(i) Fetching supported locale codes for ios9 ...', Style.RESET_ALL)
__IOS9_CODES__ = [lang_row[0] for lang_row in
csv.reader(open(resolve_file_path('lc_ios9.tsv'), 'rb'), delimiter='\t')]
print(Fore.WHITE + '(i) Supported numbers of locale code :', len(__IOS9_CODES__), Style.RESET_ALL)
global_result_logs = {}
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
# core function
def synchronize(target_file, lc): #add,remove, update (translate or copy from base)
# parse target file
target_kv = {}
target_kc = {}
target_error_lines = []
if not notexist_or_empty_file(target_file):
parsed_strings = strparser.parse_strings(filename=target_file)
for item in parsed_strings:
k, e = item['key'], item['error']
# line error
if e:
target_error_lines.append(e)
if not target_error_lines:
target_kv[k] = item['value']
target_kc[k] = item['comment']
# parsing complete or return.
if target_error_lines:
print('(!) Syntax error - Skip')
return False, None, None, target_error_lines
# base
base_content = base_dict[os.path.basename(target_file)]
base_kv = {}
base_kc = {}
for item in base_content:
k, e = item['key'], item['error']
# line error
if e:
print('(!) WARNING : Syntax error from Base -> ', k, ':', e)
base_kv[k] = item['value']
base_kc[k] = item['comment']
force_adding_keys = base_kv.keys() if __KEYS_FORCE_TRANSLATE_ALL__ else __KEYS_FORCE_TRANSLATE__
adding_keys = list(
((set(base_kv.keys()) - set(target_kv.keys())) | (set(base_kv.keys()) & set(force_adding_keys))) \
- set(base_kv.keys() if __FOLLOWING_ALL_KEYS_IFNOT_EXIST__ else __KEYS_FOLLOW_BASE__) \
)
removing_keys = list(set(target_kv.keys()) - set(base_kv.keys()))
existing_keys = list(set(base_kv.keys()) - (set(adding_keys) | set(removing_keys)))
# Filter if __ONLY_FOR_KEYS__ option activated
if __ONLY_FOR_KEYS__:
adding_keys = list(set(adding_keys) & set(__ONLY_FOR_KEYS__))
removing_keys = list(set(removing_keys) & set(__ONLY_FOR_KEYS__))
existing_keys = list(set(existing_keys) & set(__ONLY_FOR_KEYS__))
updated_keys = []
"""
perform translate
"""
translated_kv = {}
reversed_matched_kv = {} # {"ratio":float, "ignored":True|False}
reversed_translated_kv = {}
if len(adding_keys):
print('Translating...')
translated_kv = dict(zip(adding_keys, strtrans.translate_strs([base_kv[k] for k in adding_keys], lc)))
if __VERIFY_TRANS_RESULTS__:
print('Reversing results and matching...')
reversed_translated_kv = dict(
zip(adding_keys, strtrans.translate_strs([translated_kv[_ak] for _ak in adding_keys], 'en')))
for bk in adding_keys:
if bk in reversed_translated_kv:
ratio = fuzz.partial_ratio(base_kv[bk], reversed_translated_kv[bk])
should_ignore = __IGNORE_UNVERIFIED_RESULTS__ and ratio <= __RATIO_TO_IGNORE_UNVERIFIED_RESULTS__
if should_ignore:
translated_kv[bk] = base_kv[bk] # copy from base set
reversed_matched_kv[bk] = {"ratio": ratio, "ignored": should_ignore}
updated_content = []
for item in base_content:
k = item['key']
newitem = dict.fromkeys(item.keys())
newitem['key'] = k
target_value, target_comment = target_kv.get(k), target_kc.get(k)
newitem['value'] = target_value or item['value']
newitem['comment'] = target_comment if __IGNORE_COMMENTS__ else target_comment or base_kc[k]
needs_update_comment = False if __IGNORE_COMMENTS__ else not target_comment and base_kc[k]
# added
if k in adding_keys:
if k in translated_kv:
newitem['value'] = translated_kv[k]
if not newitem['comment']:
newitem['comment'] = 'Translated from: {0}'.format(base_kv[k])
reversed_matched_msg = ''
if k in reversed_matched_kv:
reversed_matched_msg = Fore.CYAN + "({}% Matched{}: \'{}\' <- \'{}\' <- \'{}\')".format(
reversed_matched_kv[k]["ratio"],
", So ignored [X]" if reversed_matched_kv[k]["ignored"] else "", reversed_translated_kv[k],
newitem['value'], base_kv[k]) + Style.RESET_ALL
print('[Add] "{0}" = "{1}" <- {2}'.format(k, newitem['value'], base_kv[k]), reversed_matched_msg)
else:
newitem['value'] = target_kv[k]
if not newitem['comment']:
newitem['comment'] = 'Translate failed from: {0}'.format(base_kv[k])
print(Fore.RED + '[Error] "{0}" = "{1}" X <- {2}'.format(k, newitem['value'],
base_kv[k]) + Style.RESET_ALL)
# exists
elif k in existing_keys:
if k != "Base" and __CUTTING_LENGTH_RATIO__>0:
if target_value != base_kv[k] \
and len_unicode(target_value) > float(len_unicode(base_kv[k]))*__CUTTING_LENGTH_RATIO__ \
or needs_update_comment:
print(Fore.YELLOW + '(!) Length of "', target_value, '" is longer than"', base_kv[k], '" as',
len(target_value), '>', len(base_kv[k]), Style.RESET_ALL)
newitem['value'] = base_kv[k]
updated_keys.append(k)
if not lc in global_result_logs:
global_result_logs[lc] = {}
global_result_logs[lc][k] = (target_value, base_kv[k])
else:
newitem['value'] = target_value or base_kv[k]
elif k in __KEYS_FOLLOW_BASE__:
newitem['value'] = base_kv[k]
if target_value != base_kv[k] or needs_update_comment:
updated_keys.append(k)
else:
newitem['value'] = target_value or base_kv[k]
if not target_value or needs_update_comment:
updated_keys.append(k)
updated_content.append(newitem)
# removed or wrong
for k in removing_keys:
print(Fore.RED + '[Remove]', k, Style.RESET_ALL)
if len(adding_keys) or len(updated_keys) or len(removing_keys):
print(Fore.WHITE + '(i) Changed Keys: Added {0}, Updated {1}, Removed {2}'.format(len(adding_keys),
len(updated_keys),
len(removing_keys)),
Style.RESET_ALL)
# check verification failed items
target_verified_items = None
if len(reversed_matched_kv):
target_verified_items = {
k: {'ratio': reversed_matched_kv[k]["ratio"], 'original': base_kv[k],
'reversed': reversed_translated_kv[k],
'translated': translated_kv[k]} for k in reversed_matched_kv.keys()}
return updated_content and (len(adding_keys) > 0 or len(updated_keys) > 0 or len(
removing_keys) > 0), updated_content, translated_kv, target_error_lines, target_verified_items
def write_file(target_file, parsed_list):
suc = False
try:
f = codecs.open(target_file, "w", "utf-8")
contents = ''
for content in parsed_list:
if content['comment']:
contents += '/*{0}*/'.format(content['comment']) + '\n'
contents += '"{0}" = "{1}";'.format(content['key'], content['value']) + '\n'
f.write(contents)
suc = True
except IOError:
print('IOError to open', target_file)
finally:
f.close()
return suc
def remove_file(target_file):
try:
os.rename(target_file, target_file + '.deleted')
return True
except IOError:
print('IOError to rename', target_file)
return False
def create_file(target_file):
open(target_file, 'a').close()
def notexist_or_empty_file(target_file):
return not os.path.exists(target_file) or os.path.getsize(target_file) == 0
def resolve_file_names(target_file_names):
return map(lambda f: f.decode('utf-8'), filter(lambda f: f.endswith(__FILE_SUFFIX__) or f.endswith(__FILE_INTENT_SUFFIX__), target_file_names))
base_dict = {}
results_dict = {}
# Get Base Language Specs
walked = list(os.walk(__RESOURCE_PATH__, topdown=True))
# Init with Base.lproj
for dir, subdirs, files in walked:
if os.path.basename(dir) == __BASE_RESOUCE_DIR__:
for _file in resolve_file_names(files):
f = os.path.join(dir, _file)
if notexist_or_empty_file(f):
continue
parsed_objs = None
# parse .strings
if f.endswith(__FILE_SUFFIX__):
parsed_objs = strparser.parse_strings(filename=f)
# parse .intentdefinition
elif f.endswith(__FILE_INTENT_SUFFIX__):
print('[i] Found "{0}" in {1}. Parse ....'.format(os.path.basename(f), __BASE_RESOUCE_DIR__))
parsed_objs = strparser_intentdefinition.parse_strings(f)
# replace to dest extenstion .strings
_file = _file.replace(__FILE_INTENT_SUFFIX__, __FILE_SUFFIX__)
# write original .strings file to local
write_file(os.path.join(dir, _file), parsed_objs)
if not parsed_objs:
continue
base_dict[_file] = parsed_objs
if not base_dict:
print('[!] Not found "{0}" in target path "{1}"'.format(__BASE_RESOUCE_DIR__, __RESOURCE_PATH__))
sys.exit(0)
# Exist or Create supporting lproj dirs.
print('Check and verifiy resources ...')
current_lproj_names = [os.path.splitext(os.path.basename(lproj_path))[0] for lproj_path in
filter(lambda d: d.endswith(__DIR_SUFFIX__), [dir for dir, subdirs, files in walked])]
notexisted_lproj_names = list(set(__XCODE_LPROJ_SUPPORTED_LOCALES__) - set(current_lproj_names))
creating_lproj_dirs = [expanduser(os.path.join(__RESOURCE_PATH__, ln + __DIR_SUFFIX__)) for ln in
notexisted_lproj_names]
if creating_lproj_dirs:
print('Following lproj dirs does not exists. Creating ...')
for d in creating_lproj_dirs:
print('Created', d)
os.mkdir(d)
# Start to sync localizable files.
print('Start synchronizing...')
for file in base_dict:
print('Target:', file)
for dir, subdirs, files in walked:
files = resolve_file_names(files)
if dir.endswith((__DIR_SUFFIX__)):
lproj_name = os.path.basename(dir).split(__DIR_SUFFIX__)[0]
if lproj_name == __BASE_LANG__:
continue
if not lproj_name in __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__:
print('Does not supported: ', lproj_name)
continue
lc = __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__[lproj_name]
if strlocale.matched_locale_code(lc, __EXCLUDING_LANGS__):
print('Skip: ', lc)
continue
results_dict[lc] = {
'deleted_files': [],
'added_files': [],
'updated_files': [],
'skipped_files': [],
'translated_files_lines': {},
'error_lines_kv': {},
'verified_result': {}
}
# if not supported_lang(lc):
# print('Does not supported: ', lc)
# results_dict[lc]['skipped_files'] = join_path_all(dir, files)
# continue
print('\n', 'Analayzing localizables... {1} (at {0})'.format(dir, lc))
added_files = list(set(base_dict.keys()) - set(files))
removed_files = list(set(files) - set(base_dict.keys()))
existing_files = list(set(files) - (set(added_files) | set(removed_files)))
added_files = join_path_all(dir, added_files)
removed_files = join_path_all(dir, removed_files)
existing_files = join_path_all(dir, existing_files)
added_cnt, updated_cnt, removed_cnt = 0, 0, 0
translated_files_lines = results_dict[lc]['translated_files_lines']
error_files = results_dict[lc]['error_lines_kv']
# remove - file
for removed_file in removed_files:
print('Removing File... {0}'.format(removed_file))
if remove_file(removed_file):
removed_cnt += 1
# add - file
for added_file in added_files:
print('Adding File... {0}'.format(added_file))
create_file(added_file)
u, c, t, e, m = synchronize(added_file, lc)
# error
if e:
error_files[added_file] = e
# normal
elif u and write_file(added_file, c):
added_cnt += 1
translated_files_lines[added_file] = t
# verify failed
for k in (m or {}):
results_dict[lc]['verified_result'][k] = m[k]
# exist - lookup lines
for ext_file in existing_files:
u, c, t, e, m = synchronize(ext_file, lc)
# error
if e:
error_files[ext_file] = e
# normal
elif u:
print('Updating File... {0}'.format(ext_file))
if write_file(ext_file, c):
updated_cnt = +1
translated_files_lines[ext_file] = t
# verify failed
for k in (m or {}):
results_dict[lc]['verified_result'][k] = m[k]
if added_cnt or updated_cnt or removed_cnt or error_files:
print(Fore.WHITE + '(i) Changed Files : Added {0}, Updated {1}, Removed {2}, Error {3}'.format(
added_cnt, updated_cnt, removed_cnt, len(error_files.keys())), Style.RESET_ALL)
else:
print('Nothing to translate or add.')
"""
Results
"""
results_dict[lc]['deleted_files'] = removed_files
results_dict[lc]['added_files'] = list(set(added_files) & set(translated_files_lines.keys()))
results_dict[lc]['updated_files'] = list(set(existing_files) & set(translated_files_lines.keys()))
if error_files:
print(error_files)
results_dict[lc]['error_lines_kv'] = error_files
# print(total Results)
print('')
t_file_cnt = \
t_line_cnt = \
file_add_cnt = \
file_add_cnt = \
file_remove_cnt = \
file_update_cnt = \
file_skip_cnt = \
0
for lc in results_dict.keys():
result_lc = results_dict[lc]
file_add_cnt += len(result_lc['added_files'])
file_remove_cnt += len(result_lc['deleted_files'])
file_update_cnt += len(result_lc['updated_files'])
file_skip_cnt += len(result_lc['skipped_files'])
for f in result_lc['added_files']: print('Added', f)
for f in result_lc['deleted_files']: print('Removed', f)
for f in result_lc['updated_files']: print('Updated', f)
for f in result_lc['skipped_files']: print('Skiped', f)
tfiles = result_lc['translated_files_lines']
if tfiles:
# print('============ Results for langcode : {0} ============='.format(lc))
for f in tfiles:
t_file_cnt += 1
if len(tfiles[f]):
# print('', f)
for key in tfiles[f]:
t_line_cnt += 1
# print(key, ' = ', tfiles[f][key])
for lc in global_result_logs.keys():
print(lc)
for t in global_result_logs[lc].keys():
o, b = global_result_logs[lc][t]
print(o.decode('utf-8'), ' -> ', b)
print('')
# WARN
found_warining = filter(lambda i: i or None, rget(results_dict, 'error_lines_kv'))
if found_warining:
print(
Fore.YELLOW + '\n[!] WARNING: Found strings that contains the syntax error. Please confirm.' + Style.RESET_ALL)
for a in found_warining:
for k in a:
print('at', k)
for i in a[k]:
print(' ', i)
# VERIFY FAILED
verified_results = filter(lambda i: i or None, rget(results_dict, 'verified_result'))
if verified_results and len(verified_results):
print(
Fore.GREEN + '\n[i] VERIFIED RESULTS: Matched ratio via reversed translation results. Please confirm.' + Style.RESET_ALL)
for lc in results_dict:
print(lc)
vr = results_dict[lc]['verified_result']
for k in vr:
vd = vr[k]
status_msg = Fore.RED + '(Ignored) ' + Style.RESET_ALL if __IGNORE_UNVERIFIED_RESULTS__ and vd[
'ratio'] <= __RATIO_TO_IGNORE_UNVERIFIED_RESULTS__ else ''
print(' {}{}: {} -> {} -> {}, Matched: {}%'.format(status_msg, k
, vd['original']
, vd['translated']
, vd['reversed']
, str(vd['ratio'])))
print('')
if file_add_cnt or file_update_cnt or file_remove_cnt or file_skip_cnt:
print('Total New Translated Strings : {0}'.format(t_line_cnt))
print('Changed Files Total : Added {0}, Updated {1}, Removed {2}, Skipped {3}'.format(file_add_cnt,
file_update_cnt,
file_remove_cnt,
file_skip_cnt))
print("Synchronized.")
else:
print("All strings are already synchronized. Nothing to translate or add.")
return
|
metasmile/strsync
|
strsync/strsync.py
|
Python
|
gpl-3.0
| 26,146
|
../../../../../../share/pyshared/papyon/gnet/proxy/SOCKS5.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/papyon/gnet/proxy/SOCKS5.py
|
Python
|
gpl-3.0
| 60
|
"""
BIANA: Biologic Interactions and Network Analysis
Copyright (C) 2009 Javier Garcia-Garcia, Emre Guney, Baldo Oliva
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import ExternalEntity
import ExternalEntityAttribute
class Ontology(ExternalEntity.ExternalEntity):
"""
Class to represent a general ontology
The Ontology is an external entity itself. Each of its elemens is also an ExternalEntityObject
Each ontology as a "linkedAttribute", which is the primary attribute to represent the ontology (for example, taxID for taxonomy), and a "descriptionAttribute", which is an attribute that describes the external entity element
"""
def __init__(self, source_database, name, linkedAttribute, descriptionAttribute, id=None, levelAttribute=None ):
"""
"source_database" is the source database id where this entity is described
"name" is the name for the ontology. It must be UNIQUE! There cannot be different ontologies with the same name
"linkedAttribute" is the attribute_identifier for the primary attribute of the ontology (for example, taxID for taxonomy ontology)
"descriptionAttribute" is the attribute_identifier for representing a human readable description of the element. This attribute is used when showing the ontolgy to users
"id" is the UNIQUE identifier in the database for this external entity (as the ontology is an External Entity)
"""
self.name = name
self.trees = {} # is_a in the first position, is_part_of in the second one
self.hierarchy = {} # vertical hierarchy
self.root_ids = set()
self.linked_attribute = linkedAttribute
self.level_attribute = levelAttribute
self.linked_attribute_values = {}
self._attrID2id = {}
self.description_attribute = descriptionAttribute
self.externalEntityObjs = {}
self.all_ids = set()
self.precalculated_descendants = {}
ExternalEntity.ExternalEntity.__init__(self, source_database = source_database, type="ontology", id=id)
def add_element(self, ontologyElementID, isA=[], isPartOf=[], linkedAttributeValue=None):
"""
Adds an element to the ontology.
"ontologyElementID": externalEntityID that identifies the externalEntityObject belonging to the ontology
"isA": list with the externalEntityIDs of the parents of this element
"isPartOf": list with the externalEntityIDs of the elements to which this element is part of
"linkedAttributeValue" is the value of the main attribute of the added external entity. Not mandatory.
"""
self.all_ids.add(ontologyElementID)
self.linked_attribute_values[ontologyElementID] = linkedAttributeValue
self._attrID2id[linkedAttributeValue]=ontologyElementID
self.hierarchy.setdefault(ontologyElementID,[])
if( len(isA)==0 ):
self.root_ids.add(ontologyElementID)
self.trees[ontologyElementID] = (isA,isPartOf)
for current_parent in isA:
self.hierarchy.setdefault(current_parent,[]).append(ontologyElementID)
# Adding part_of to the hierarchy
for current_parent in isPartOf:
self.hierarchy.setdefault(current_parent,[]).append(ontologyElementID)
def _set_external_entities_dict(self, externalEntitiesDict):
"""
Sets the external entity objects corresponding to the elements of the ontology
"externalEntitiesDict": Dictionary with all the external entities. Key: externalEntityID. Value: externalEntity Object
Objects are only required for printing the ontology
"""
self.externalEntityObjs = externalEntitiesDict
def get_all_external_entity_ids(self):
return self.all_ids
def linkedAttrID2ID(self, attr_id):
return self._attrID2id[attr_id]
def get_descendants(self, ontologyElementID):
"""
Gets all the descendants, using the "is_a" relation
"""
if self.precalculated_descendants.has_key(ontologyElementID):
return self.precalculated_descendants[ontologyElementID]
result = set()
#result = []
for current_descendant_id in self.hierarchy[ontologyElementID]:
if current_descendant_id == ontologyElementID:
sys.stderr.write("Ontology has a loop. An element %s [%s] is a child of itself?\n" %(current_descendant_id,self.linked_attribute_values[current_descendant_id]))
return result
else:
if current_descendant_id not in result:
result.add(current_descendant_id)
result.update(self.get_descendants(current_descendant_id))
# result.update(self.get_descendants(current_descendant_id))
else:
sys.stderr.write("Ontology has a loop, between %s [%s] and %s [%s]\n" %(current_descendant_id,
self.linked_attribute_values[current_descendant_id],
ontologyElementID,
self.linked_attribute_values[ontologyElementID]))
self.precalculated_descendants[ontologyElementID] = result
return result
def get_linked_attr_and_description_tuples(self, value_seperator=", "):
"""
Returns a list of tuples with the format: (linked_attr, descriptive_attr)
"""
if len(self.externalEntityObjs) == 0:
sys.stderr.write("External Entities have not been retrieved when Ontology is loaded! Information not available\n")
return []
return [ ( self.linked_attribute_values[x], value_seperator.join([ y.value for y in self.externalEntityObjs[x].get_attribute(self.description_attribute)]) ) for x in self.linked_attribute_values ]
def get_all_linked_attributes(self):
"""
Returns a list with the main attribute for all the elements in the ontology
"""
return self.linked_attribute_values.values()
def get_all_external_entity_ids(self):
"""
Returns a list with the external entity ids of all the elements in the ontology
"""
return self.linked_attribute_values.keys()
def has_element(self, linkedAttributeID):
"""
Returns a boolean indicating if an external entity with this attribute is found in the ontology
"""
return linkedAttributeID in self.linked_attribute_values
def get_parents_ids(self, elementID):
"""
Returns a list with the parents of the element with this externalEntityID (using the relation is_a)
"""
return self.trees[elementID][0]
def get_part_parents_ids(self, elementID):
"""
Returns a list with the parents of the element with this externalEntityID (using the relation is_part_of)
"""
return self.trees[elementID][1]
def _recursive_tree_print(self, id, outmethod, depth=0):
"""
Prints recursively in stdout a tree representing the ontology, using the external entity id.
Only for testing purposes
"""
for x in xrange(depth):
outmethod("\t")
#outmethod(str(id))
outmethod("%s [%s]" %('|'.join([ x.value for x in self.externalEntityObjs[id].get_attribute(self.description_attribute) ]),self.linked_attribute_values[id]))
outmethod("\n")
depth = depth+1
for current_descendant_id in self.hierarchy[id]:
self._recursive_tree_print(current_descendant_id, outmethod, depth)
def print_tree(self, outmethod=sys.stdout.write):
"""
Prints recursively in stdout a tree representing the ontology, using the external entity id.
Only for testing purposes
"""
#print self.root_ids
for x in self.root_ids:
self._recursive_tree_print( id = x,
depth = 0,
outmethod = outmethod )
def _recursive_tree_xml(self, id):
nodes_xml = [ "<node ontologyNodeID=\"%s\" id=\"%s\">" %(self.linked_attribute_values[id],
'|'.join([ x.value for x in self.externalEntityObjs[id].get_attribute(self.description_attribute) ])) ]
for current_descendant_id in self.hierarchy[id]:
nodes_xml.extend(self._recursive_tree_xml(current_descendant_id))
nodes_xml.append("</node>")
return nodes_xml
def get_xml(self):
"""
Returns a String with an XML representation of the ontology
To execute this method, is necessary to load in the ontology all the external Entity Objects
"""
nodes_xml = ["<ontology name=\"%s\">" %self.name]
for x in self.root_ids:
nodes_xml.extend(self._recursive_tree_xml( id = x) )
nodes_xml.append("</ontology>")
return "\n".join(nodes_xml)
def _traverse_tree_for_leafs(self, id):
"""
Helper function to reach leafs traversing the tree
"""
if len(self.hierarchy[id]) == 0:
#print self.linked_attribute_values[id], self.hierarchy[id], self.trees[id]
nodes = [ ", ".join([ x.value for x in self.externalEntityObjs[id].get_attribute(self.description_attribute) ]) ]
else:
nodes = []
for current_descendant_id in self.hierarchy[id]:
nodes.extend(self._traverse_tree_for_leafs(current_descendant_id))
return nodes
def get_leafs(self):
"""
Returns a list of leafs in the ontology tree
To execute this method, is necessary to load in the ontology all the external Entity Objects
"""
leaf_nodes = []
for x in self.root_ids:
leaf_nodes.extend(self._traverse_tree_for_leafs( id = x) )
return leaf_nodes
|
emreg00/biana
|
biana/BianaObjects/Ontology.py
|
Python
|
gpl-3.0
| 10,887
|
import argparse
from pyfastaq import tasks
def run(description):
parser = argparse.ArgumentParser(
description = 'Prints the number of sequences in input file to stdout',
usage = 'fastaq count_sequences <infile>')
parser.add_argument('infile', help='Name of input file')
options = parser.parse_args()
print(tasks.count_sequences(options.infile))
|
martinghunt/Fastaq
|
pyfastaq/runners/count_sequences.py
|
Python
|
gpl-3.0
| 379
|
# -*- coding: utf-8 -*-
#
# Copyright © 2014 René Samselnig
#
# This file is part of Database Navigator.
#
# Database Navigator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Database Navigator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Database Navigator. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import logging
import pdb
import urllib2
import json
import ijson
from dbmanagr.writer import Writer
from dbmanagr import logger as log
from dbmanagr.jsonable import from_json
COMMANDS = {
'dbdiff': 'differ',
'dbexec': 'executer',
'dbexport': 'exporter',
'dbgraph': 'grapher',
'dbnav': 'navigator'
}
class Wrapper(object):
def __init__(self, options=None):
self.options = options
def write(self):
try:
sys.stdout.write(Writer.write(self.run()))
except BaseException as e:
log.logger.exception(e)
return -1
return 0
def execute(self): # pragma: no cover
"""To be overridden by sub classes"""
pass
def run(self):
try:
if (
self.options is not None
and self.options.daemon): # pragma: no cover
log.logger.debug('Executing remotely')
return self.executer(*sys.argv)
log.logger.debug('Executing locally')
return self.execute()
except BaseException as e:
log.logger.exception(e)
if log.logger.getEffectiveLevel() <= logging.DEBUG:
# Start post mortem debugging only when debugging is enabled
if os.getenv('UNITTEST', 'False') == 'True':
raise
if self.options.trace: # pragma: no cover
pdb.post_mortem(sys.exc_info()[2])
else:
# Show the error message if log level is INFO or higher
log.log_error(e) # pragma: no cover
def executer(self, *args): # pragma: no cover
"""Execute remotely"""
options = self.options
try:
# from dbmanagr import daemon
# if not daemon.is_running(options):
# daemon.start_server(options)
url = 'http://{host}:{port}/{path}'.format(
host=options.host,
port=options.port,
path=COMMANDS[options.prog])
request = json.dumps(args[1:])
log.logger.debug('Request to %s:\n%s', url, request)
response = urllib2.urlopen(url, request)
for i in ijson.items(response, 'item'):
yield from_json(i)
except urllib2.HTTPError as e:
raise from_json(json.load(e))
except urllib2.URLError as e:
log.logger.error('Daemon not available: %s', e)
except BaseException as e:
log.logger.exception(e)
|
resamsel/dbnavigator
|
src/dbmanagr/wrapper.py
|
Python
|
gpl-3.0
| 3,349
|
# -*- coding: utf-8 -*-
import unittest
from test.basetestcases import PluginLoadingMixin
class StatisticsLoadingTest (PluginLoadingMixin, unittest.TestCase):
def getPluginDir(self):
"""
Должен возвращать путь до папки с тестируемым плагином
"""
return "../plugins/statistics"
def getPluginName(self):
"""
Должен возвращать имя плагина, по которому его можно
найти в PluginsLoader
"""
return "Statistics"
|
unreal666/outwiker
|
src/test/plugins/statistics/test_loading.py
|
Python
|
gpl-3.0
| 587
|
#!/usr/bin/env python
import argparse
import json
import csv
import sys
sys.path.append('python')
import plotting
import utils
from opener import opener
parser = argparse.ArgumentParser()
parser.add_argument('-b', action='store_true') # passed on to ROOT when plotting
parser.add_argument('--outdir', required=True)
parser.add_argument('--plotdirs', required=True)
parser.add_argument('--names', required=True)
parser.add_argument('--stats', default='')
parser.add_argument('--no-errors', action='store_true')
parser.add_argument('--plot-performance', action='store_true')
parser.add_argument('--scale-errors')
parser.add_argument('--rebin', type=int)
parser.add_argument('--colors')
parser.add_argument('--linestyles')
parser.add_argument('--datadir', default='data/imgt')
parser.add_argument('--leaves-per-tree')
parser.add_argument('--linewidths')
parser.add_argument('--markersizes')
parser.add_argument('--dont-calculate-mean-info', action='store_true')
parser.add_argument('--normalize', action='store_true')
parser.add_argument('--graphify', action='store_true')
parser.add_argument('--strings-to-ignore') # remove this string from the plot names in each dir (e.g. '-mean-bins') NOTE replaces '_' with '-'
args = parser.parse_args()
if args.strings_to_ignore is not None:
args.strings_to_ignore = args.strings_to_ignore.replace('_', '-')
args.plotdirs = utils.get_arg_list(args.plotdirs)
args.scale_errors = utils.get_arg_list(args.scale_errors)
args.colors = utils.get_arg_list(args.colors, intify=True)
args.linestyles = utils.get_arg_list(args.linestyles, intify=True)
args.names = utils.get_arg_list(args.names)
args.leaves_per_tree = utils.get_arg_list(args.leaves_per_tree, intify=True)
args.strings_to_ignore = utils.get_arg_list(args.strings_to_ignore)
args.markersizes = utils.get_arg_list(args.markersizes, intify=True)
args.linewidths = utils.get_arg_list(args.linewidths, intify=True)
for iname in range(len(args.names)):
args.names[iname] = args.names[iname].replace('@', ' ')
assert len(args.plotdirs) == len(args.names)
with opener('r')(args.datadir + '/v-meta.json') as json_file: # get location of <begin> cysteine in each v region
args.cyst_positions = json.load(json_file)
with opener('r')(args.datadir + '/j_tryp.csv') as csv_file: # get location of <end> tryptophan in each j region (TGG)
tryp_reader = csv.reader(csv_file)
args.tryp_positions = {row[0]:row[1] for row in tryp_reader} # WARNING: this doesn't filter out the header line
plotting.compare_directories(args)
|
psathyrella/partis-deprecated
|
python/compare.py
|
Python
|
gpl-3.0
| 2,531
|
# browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <johann@browsershots.org>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
ShotFactory library.
"""
__revision__ = "$Rev: 2006 $"
__date__ = "$Date: 2007-08-20 06:02:52 +0530 (Mon, 20 Aug 2007) $"
__author__ = "$Author: johann $"
|
mintuhouse/shotfactory
|
shotfactory04/__init__.py
|
Python
|
gpl-3.0
| 924
|
from __future__ import absolute_import
import urlparse
import urllib
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode, smart_str
from .managers import RecentSearchManager
from .classes import SearchModel
class RecentSearch(models.Model):
"""
Keeps a list of the n most recent search keywords for a given user
"""
user = models.ForeignKey(User, verbose_name=_(u'user'), editable=False)
query = models.TextField(verbose_name=_(u'query'), editable=False)
datetime_created = models.DateTimeField(verbose_name=_(u'datetime created'), editable=False)
hits = models.IntegerField(verbose_name=_(u'hits'), editable=False)
objects = RecentSearchManager()
def __unicode__(self):
document_search = SearchModel.get('documents.Document')
query_dict = urlparse.parse_qs(urllib.unquote_plus(smart_str(self.query)))
if self.is_advanced():
# Advanced search
advanced_string = []
for key, value in query_dict.items():
search_field = document_search.get_search_field(key)
advanced_string.append(u'%s: %s' % (search_field.label, smart_unicode(' '.join(value))))
display_string = u', '.join(advanced_string)
else:
# Is a simple search
display_string = smart_unicode(' '.join(query_dict['q']))
return u'%s (%s)' % (display_string, self.hits)
def save(self, *args, **kwargs):
self.datetime_created = datetime.now()
super(RecentSearch, self).save(*args, **kwargs)
def url(self):
view = 'results' if self.is_advanced() else 'search'
return '%s?%s' % (reverse(view), self.query)
def is_advanced(self):
return 'q' not in urlparse.parse_qs(self.query)
class Meta:
ordering = ('-datetime_created',)
verbose_name = _(u'recent search')
verbose_name_plural = _(u'recent searches')
|
appsembler/mayan_appsembler
|
apps/dynamic_search/models.py
|
Python
|
gpl-3.0
| 2,130
|
import sys
import logging
import numpy as np
from aravis import Camera
if __name__ == "__main__":
#cam = ar.get_camera("Prosilica-02-2130A-06106")
#cam = Camera("AT-Automation Technology GmbH-20805103")
cam = Camera(loglevel=logging.DEBUG)
if len(sys.argv) > 1:
path = sys.argv[1]
else:
path = "frame.npy"
#cam.start_acquisition_trigger()
cam.start_acquisition_continuous()
frame = cam.pop_frame()
print("Saving frame to ", path)
np.save(path, frame)
cam.stop_acquisition()
|
oroulet/python-aravis
|
examples/save-frame.py
|
Python
|
gpl-3.0
| 538
|
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansDevanagari-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0065) #uni0961
glyphs.append(0x0178) #glyph00376
glyphs.append(0x02D8) #glyph00728
glyphs.append(0x0179) #glyph00377
glyphs.append(0x02EF) #four
glyphs.append(0x024D) #glyph00589
glyphs.append(0x0310) #uniFEFF
glyphs.append(0x0176) #glyph00374
glyphs.append(0x01AC) #glyph00428
glyphs.append(0x01AD) #glyph00429
glyphs.append(0x01AA) #glyph00426
glyphs.append(0x01AB) #glyph00427
glyphs.append(0x01A8) #glyph00424
glyphs.append(0x0177) #glyph00375
glyphs.append(0x01A6) #glyph00422
glyphs.append(0x01A7) #glyph00423
glyphs.append(0x01A4) #glyph00420
glyphs.append(0x01A5) #glyph00421
glyphs.append(0x030A) #quotedblright
glyphs.append(0x0354) #uni1CEC
glyphs.append(0x0172) #glyph00370
glyphs.append(0x01A9) #glyph00425
glyphs.append(0x02A7) #glyph00679
glyphs.append(0x0183) #glyph00387
glyphs.append(0x0182) #glyph00386
glyphs.append(0x0181) #glyph00385
glyphs.append(0x0180) #glyph00384
glyphs.append(0x017F) #glyph00383
glyphs.append(0x0173) #glyph00371
glyphs.append(0x017D) #glyph00381
glyphs.append(0x017C) #glyph00380
glyphs.append(0x0185) #glyph00389
glyphs.append(0x0184) #glyph00388
glyphs.append(0x008D) #glyph00141
glyphs.append(0x008C) #glyph00140
glyphs.append(0x008F) #glyph00143
glyphs.append(0x008E) #glyph00142
glyphs.append(0x0091) #glyph00145
glyphs.append(0x0090) #glyph00144
glyphs.append(0x0093) #glyph00147
glyphs.append(0x0092) #glyph00146
glyphs.append(0x0095) #glyph00149
glyphs.append(0x0094) #glyph00148
glyphs.append(0x0277) #glyph00631
glyphs.append(0x0276) #glyph00630
glyphs.append(0x027D) #glyph00637
glyphs.append(0x027C) #glyph00636
glyphs.append(0x027B) #glyph00635
glyphs.append(0x027A) #glyph00634
glyphs.append(0x02DC) #glyph00732
glyphs.append(0x02DD) #glyph00733
glyphs.append(0x02DA) #glyph00730
glyphs.append(0x02DB) #glyph00731
glyphs.append(0x02C4) #glyph00708
glyphs.append(0x0353) #uni1CEB
glyphs.append(0x0136) #glyph00310
glyphs.append(0x02EB) #zero
glyphs.append(0x0137) #glyph00311
glyphs.append(0x0374) #glyph00884
glyphs.append(0x0375) #glyph00885
glyphs.append(0x0376) #glyph00886
glyphs.append(0x0139) #glyph00313
glyphs.append(0x0370) #glyph00880
glyphs.append(0x0371) #glyph00881
glyphs.append(0x0372) #glyph00882
glyphs.append(0x0373) #glyph00883
glyphs.append(0x013A) #glyph00314
glyphs.append(0x0378) #glyph00888
glyphs.append(0x0379) #glyph00889
glyphs.append(0x02E2) #quotesingle
glyphs.append(0x013C) #glyph00316
glyphs.append(0x013D) #glyph00317
glyphs.append(0x0307) #quoteleft
glyphs.append(0x0239) #glyph00569
glyphs.append(0x0238) #glyph00568
glyphs.append(0x0237) #glyph00567
glyphs.append(0x0236) #glyph00566
glyphs.append(0x0235) #glyph00565
glyphs.append(0x0234) #glyph00564
glyphs.append(0x0233) #glyph00563
glyphs.append(0x0232) #glyph00562
glyphs.append(0x0231) #glyph00561
glyphs.append(0x0230) #glyph00560
glyphs.append(0x00CE) #glyph00206
glyphs.append(0x00CF) #glyph00207
glyphs.append(0x00CC) #glyph00204
glyphs.append(0x00CD) #glyph00205
glyphs.append(0x00CA) #glyph00202
glyphs.append(0x00CB) #glyph00203
glyphs.append(0x00C8) #glyph00200
glyphs.append(0x00C9) #glyph00201
glyphs.append(0x0169) #glyph00361
glyphs.append(0x0168) #glyph00360
glyphs.append(0x016B) #glyph00363
glyphs.append(0x016A) #glyph00362
glyphs.append(0x016D) #glyph00365
glyphs.append(0x016C) #glyph00364
glyphs.append(0x00D0) #glyph00208
glyphs.append(0x00D1) #glyph00209
glyphs.append(0x021C) #glyph00540
glyphs.append(0x030D) #divide
glyphs.append(0x02BF) #glyph00703
glyphs.append(0x0303) #asciitilde
glyphs.append(0x037C) #glyph00892
glyphs.append(0x0206) #glyph00518
glyphs.append(0x0207) #glyph00519
glyphs.append(0x0377) #glyph00887
glyphs.append(0x0200) #glyph00512
glyphs.append(0x0201) #glyph00513
glyphs.append(0x01FE) #glyph00510
glyphs.append(0x01FF) #glyph00511
glyphs.append(0x0204) #glyph00516
glyphs.append(0x0205) #glyph00517
glyphs.append(0x0202) #glyph00514
glyphs.append(0x0203) #glyph00515
glyphs.append(0x000C) #uni0908
glyphs.append(0x000D) #uni0909
glyphs.append(0x0008) #uni0904
glyphs.append(0x0009) #uni0905
glyphs.append(0x000A) #uni0906
glyphs.append(0x000B) #uni0907
glyphs.append(0x0004) #uni0900
glyphs.append(0x0005) #uni0901
glyphs.append(0x0006) #uni0902
glyphs.append(0x0007) #uni0903
glyphs.append(0x01A3) #glyph00419
glyphs.append(0x0244) #glyph00580
glyphs.append(0x0011) #uni090D
glyphs.append(0x0012) #uni090E
glyphs.append(0x0013) #uni090F
glyphs.append(0x000E) #uni090A
glyphs.append(0x000F) #uni090B
glyphs.append(0x0010) #uni090C
glyphs.append(0x02A5) #glyph00677
glyphs.append(0x02A4) #glyph00676
glyphs.append(0x02A3) #glyph00675
glyphs.append(0x02A2) #glyph00674
glyphs.append(0x02A1) #glyph00673
glyphs.append(0x02A0) #glyph00672
glyphs.append(0x029F) #glyph00671
glyphs.append(0x029E) #glyph00670
glyphs.append(0x02FE) #asciicircum
glyphs.append(0x01A2) #glyph00418
glyphs.append(0x02A6) #glyph00678
glyphs.append(0x01D7) #glyph00471
glyphs.append(0x01D6) #glyph00470
glyphs.append(0x01D9) #glyph00473
glyphs.append(0x01D8) #glyph00472
glyphs.append(0x01DB) #glyph00475
glyphs.append(0x01DA) #glyph00474
glyphs.append(0x01DD) #glyph00477
glyphs.append(0x01DC) #glyph00476
glyphs.append(0x01DF) #glyph00479
glyphs.append(0x01DE) #glyph00478
glyphs.append(0x02F4) #nine
glyphs.append(0x02FC) #backslash
glyphs.append(0x017E) #glyph00382
glyphs.append(0x025A) #glyph00602
glyphs.append(0x025B) #glyph00603
glyphs.append(0x0258) #glyph00600
glyphs.append(0x0259) #glyph00601
glyphs.append(0x025E) #glyph00606
glyphs.append(0x025F) #glyph00607
glyphs.append(0x025C) #glyph00604
glyphs.append(0x025D) #glyph00605
glyphs.append(0x0260) #glyph00608
glyphs.append(0x0261) #glyph00609
glyphs.append(0x030E) #minus
glyphs.append(0x0225) #glyph00549
glyphs.append(0x021A) #glyph00538
glyphs.append(0x021B) #glyph00539
glyphs.append(0x0145) #glyph00325
glyphs.append(0x0144) #glyph00324
glyphs.append(0x0147) #glyph00327
glyphs.append(0x0146) #glyph00326
glyphs.append(0x0141) #glyph00321
glyphs.append(0x0140) #glyph00320
glyphs.append(0x0143) #glyph00323
glyphs.append(0x0142) #glyph00322
glyphs.append(0x0149) #glyph00329
glyphs.append(0x0148) #glyph00328
glyphs.append(0x0224) #glyph00548
glyphs.append(0x02BB) #glyph00699
glyphs.append(0x02BA) #glyph00698
glyphs.append(0x027E) #glyph00638
glyphs.append(0x031F) #uniA8E3
glyphs.append(0x02B3) #glyph00691
glyphs.append(0x02B2) #glyph00690
glyphs.append(0x02B5) #glyph00693
glyphs.append(0x02B4) #glyph00692
glyphs.append(0x02B7) #glyph00695
glyphs.append(0x02B6) #glyph00694
glyphs.append(0x02B9) #glyph00697
glyphs.append(0x02B8) #glyph00696
glyphs.append(0x02F1) #six
glyphs.append(0x02FF) #underscore
glyphs.append(0x0279) #glyph00633
glyphs.append(0x02E7) #comma
glyphs.append(0x0214) #glyph00532
glyphs.append(0x0278) #glyph00632
glyphs.append(0x0215) #glyph00533
glyphs.append(0x0044) #uni0940
glyphs.append(0x0045) #uni0941
glyphs.append(0x0046) #uni0942
glyphs.append(0x0047) #uni0943
glyphs.append(0x0048) #uni0944
glyphs.append(0x0049) #uni0945
glyphs.append(0x004A) #uni0946
glyphs.append(0x004B) #uni0947
glyphs.append(0x004C) #uni0948
glyphs.append(0x004D) #uni0949
glyphs.append(0x00FD) #glyph00253
glyphs.append(0x00FC) #glyph00252
glyphs.append(0x00FF) #glyph00255
glyphs.append(0x00FE) #glyph00254
glyphs.append(0x0101) #glyph00257
glyphs.append(0x0100) #glyph00256
glyphs.append(0x0315) #uniA833
glyphs.append(0x0314) #uniA832
glyphs.append(0x0313) #uniA831
glyphs.append(0x0312) #uniA830
glyphs.append(0x0319) #uniA837
glyphs.append(0x0318) #uniA836
glyphs.append(0x0317) #uniA835
glyphs.append(0x0316) #uniA834
glyphs.append(0x031D) #uniA8E1
glyphs.append(0x031C) #uniA8E0
glyphs.append(0x031B) #uniA839
glyphs.append(0x031A) #uniA838
glyphs.append(0x0321) #uniA8E5
glyphs.append(0x0320) #uniA8E4
glyphs.append(0x0323) #uniA8E7
glyphs.append(0x0322) #uniA8E6
glyphs.append(0x02F9) #greater
glyphs.append(0x0326) #uniA8EA
glyphs.append(0x0328) #uniA8EC
glyphs.append(0x0327) #uniA8EB
glyphs.append(0x032A) #uniA8EE
glyphs.append(0x0329) #uniA8ED
glyphs.append(0x021D) #glyph00541
glyphs.append(0x004E) #uni094A
glyphs.append(0x004F) #uni094B
glyphs.append(0x0050) #uni094C
glyphs.append(0x0051) #uni094D
glyphs.append(0x0052) #uni094E
glyphs.append(0x0053) #uni094F
glyphs.append(0x01B3) #glyph00435
glyphs.append(0x007D) #uni0979
glyphs.append(0x01B5) #glyph00437
glyphs.append(0x01B4) #glyph00436
glyphs.append(0x01AF) #glyph00431
glyphs.append(0x01AE) #glyph00430
glyphs.append(0x01B1) #glyph00433
glyphs.append(0x007C) #uni0978
glyphs.append(0x02C5) #glyph00709
glyphs.append(0x0108) #glyph00264
glyphs.append(0x024F) #glyph00591
glyphs.append(0x0109) #glyph00265
glyphs.append(0x010A) #glyph00266
glyphs.append(0x024C) #glyph00588
glyphs.append(0x010B) #glyph00267
glyphs.append(0x0075) #uni0971
glyphs.append(0x0106) #glyph00262
glyphs.append(0x0105) #glyph00261
glyphs.append(0x00AE) #glyph00174
glyphs.append(0x00AF) #glyph00175
glyphs.append(0x00B0) #glyph00176
glyphs.append(0x00B1) #glyph00177
glyphs.append(0x00AA) #glyph00170
glyphs.append(0x00AB) #glyph00171
glyphs.append(0x00AC) #glyph00172
glyphs.append(0x00AD) #glyph00173
glyphs.append(0x0286) #glyph00646
glyphs.append(0x0287) #glyph00647
glyphs.append(0x0284) #glyph00644
glyphs.append(0x0107) #glyph00263
glyphs.append(0x00B2) #glyph00178
glyphs.append(0x00B3) #glyph00179
glyphs.append(0x0280) #glyph00640
glyphs.append(0x0281) #glyph00641
glyphs.append(0x01E8) #glyph00488
glyphs.append(0x01E9) #glyph00489
glyphs.append(0x02D3) #glyph00723
glyphs.append(0x02D2) #glyph00722
glyphs.append(0x02D5) #glyph00725
glyphs.append(0x02D4) #glyph00724
glyphs.append(0x02D7) #glyph00727
glyphs.append(0x02D6) #glyph00726
glyphs.append(0x01E0) #glyph00480
glyphs.append(0x01E1) #glyph00481
glyphs.append(0x01E2) #glyph00482
glyphs.append(0x01E3) #glyph00483
glyphs.append(0x01E4) #glyph00484
glyphs.append(0x01E5) #glyph00485
glyphs.append(0x01E6) #glyph00486
glyphs.append(0x01E7) #glyph00487
glyphs.append(0x0308) #quoteright
glyphs.append(0x02E3) #parenleft
glyphs.append(0x0120) #glyph00288
glyphs.append(0x0121) #glyph00289
glyphs.append(0x011E) #glyph00286
glyphs.append(0x011F) #glyph00287
glyphs.append(0x011C) #glyph00284
glyphs.append(0x011D) #glyph00285
glyphs.append(0x011A) #glyph00282
glyphs.append(0x011B) #glyph00283
glyphs.append(0x0118) #glyph00280
glyphs.append(0x0119) #glyph00281
glyphs.append(0x0383) #glyph00899
glyphs.append(0x0382) #glyph00898
glyphs.append(0x022C) #glyph00556
glyphs.append(0x037D) #glyph00893
glyphs.append(0x0166) #glyph00358
glyphs.append(0x037B) #glyph00891
glyphs.append(0x037A) #glyph00890
glyphs.append(0x0381) #glyph00897
glyphs.append(0x0380) #glyph00896
glyphs.append(0x037F) #glyph00895
glyphs.append(0x0167) #glyph00359
glyphs.append(0x01CB) #glyph00459
glyphs.append(0x01CA) #glyph00458
glyphs.append(0x0000) #.notdef
glyphs.append(0x003E) #uni093A
glyphs.append(0x015E) #glyph00350
glyphs.append(0x0040) #uni093C
glyphs.append(0x003F) #uni093B
glyphs.append(0x0042) #uni093E
glyphs.append(0x0041) #uni093D
glyphs.append(0x0043) #uni093F
glyphs.append(0x0250) #glyph00592
glyphs.append(0x0251) #glyph00593
glyphs.append(0x024E) #glyph00590
glyphs.append(0x022A) #glyph00554
glyphs.append(0x0254) #glyph00596
glyphs.append(0x0255) #glyph00597
glyphs.append(0x0252) #glyph00594
glyphs.append(0x0253) #glyph00595
glyphs.append(0x0256) #glyph00598
glyphs.append(0x0257) #glyph00599
glyphs.append(0x00D7) #glyph00215
glyphs.append(0x00D6) #glyph00214
glyphs.append(0x00D9) #glyph00217
glyphs.append(0x00D8) #glyph00216
glyphs.append(0x00D3) #glyph00211
glyphs.append(0x00D2) #glyph00210
glyphs.append(0x00D5) #glyph00213
glyphs.append(0x00D4) #glyph00212
glyphs.append(0x0162) #glyph00354
glyphs.append(0x0163) #glyph00355
glyphs.append(0x0164) #glyph00356
glyphs.append(0x0165) #glyph00357
glyphs.append(0x00DB) #glyph00219
glyphs.append(0x00DA) #glyph00218
glyphs.append(0x0160) #glyph00352
glyphs.append(0x0161) #glyph00353
glyphs.append(0x0362) #glyph00866
glyphs.append(0x0363) #glyph00867
glyphs.append(0x0360) #glyph00864
glyphs.append(0x0361) #glyph00865
glyphs.append(0x035F) #glyph00863
glyphs.append(0x01C6) #glyph00454
glyphs.append(0x0364) #glyph00868
glyphs.append(0x0365) #glyph00869
glyphs.append(0x01FD) #glyph00509
glyphs.append(0x01FC) #glyph00508
glyphs.append(0x0352) #uni1CEA
glyphs.append(0x01F5) #glyph00501
glyphs.append(0x01F4) #glyph00500
glyphs.append(0x01F7) #glyph00503
glyphs.append(0x01F6) #glyph00502
glyphs.append(0x01F9) #glyph00505
glyphs.append(0x01F8) #glyph00504
glyphs.append(0x01FB) #glyph00507
glyphs.append(0x01FA) #glyph00506
glyphs.append(0x003D) #uni0939
glyphs.append(0x003C) #uni0938
glyphs.append(0x0035) #uni0931
glyphs.append(0x0034) #uni0930
glyphs.append(0x0037) #uni0933
glyphs.append(0x0036) #uni0932
glyphs.append(0x0039) #uni0935
glyphs.append(0x0038) #uni0934
glyphs.append(0x003B) #uni0937
glyphs.append(0x003A) #uni0936
glyphs.append(0x0357) #uni1CEF
glyphs.append(0x0088) #glyph00136
glyphs.append(0x0089) #glyph00137
glyphs.append(0x008A) #glyph00138
glyphs.append(0x008B) #glyph00139
glyphs.append(0x0346) #uni1CDE
glyphs.append(0x0345) #uni1CDD
glyphs.append(0x0347) #uni1CDF
glyphs.append(0x0342) #uni1CDA
glyphs.append(0x0344) #uni1CDC
glyphs.append(0x0343) #uni1CDB
glyphs.append(0x01BC) #glyph00444
glyphs.append(0x01BD) #glyph00445
glyphs.append(0x01BE) #glyph00446
glyphs.append(0x01BF) #glyph00447
glyphs.append(0x01B8) #glyph00440
glyphs.append(0x015F) #glyph00351
glyphs.append(0x01BA) #glyph00442
glyphs.append(0x01BB) #glyph00443
glyphs.append(0x0227) #glyph00551
glyphs.append(0x01C0) #glyph00448
glyphs.append(0x01C1) #glyph00449
glyphs.append(0x031E) #uniA8E2
glyphs.append(0x02DF) #quotedbl
glyphs.append(0x02E0) #numbersign
glyphs.append(0x035C) #uni1CF4
glyphs.append(0x02F3) #eight
glyphs.append(0x0301) #bar
glyphs.append(0x030C) #multiply
glyphs.append(0x0356) #uni1CEE
glyphs.append(0x0263) #glyph00611
glyphs.append(0x0262) #glyph00610
glyphs.append(0x0265) #glyph00613
glyphs.append(0x0264) #glyph00612
glyphs.append(0x0087) #uni25CC
glyphs.append(0x0266) #glyph00614
glyphs.append(0x0269) #glyph00617
glyphs.append(0x0268) #glyph00616
glyphs.append(0x026B) #glyph00619
glyphs.append(0x026A) #glyph00618
glyphs.append(0x033F) #uni1CD7
glyphs.append(0x033E) #uni1CD6
glyphs.append(0x0339) #uni1CD1
glyphs.append(0x0338) #uni1CD0
glyphs.append(0x033B) #uni1CD3
glyphs.append(0x033A) #uni1CD2
glyphs.append(0x013E) #glyph00318
glyphs.append(0x013F) #glyph00319
glyphs.append(0x0082) #uni097E
glyphs.append(0x0081) #uni097D
glyphs.append(0x0138) #glyph00312
glyphs.append(0x0083) #uni097F
glyphs.append(0x007E) #uni097A
glyphs.append(0x013B) #glyph00315
glyphs.append(0x0080) #uni097C
glyphs.append(0x007F) #uni097B
glyphs.append(0x02EC) #one
glyphs.append(0x020D) #glyph00525
glyphs.append(0x02F7) #less
glyphs.append(0x0384) #glyph00900
glyphs.append(0x035B) #uni1CF3
glyphs.append(0x035A) #uni1CF2
glyphs.append(0x0359) #uni1CF1
glyphs.append(0x0358) #uni1CF0
glyphs.append(0x035E) #uni1CF6
glyphs.append(0x0221) #glyph00545
glyphs.append(0x0220) #glyph00544
glyphs.append(0x0223) #glyph00547
glyphs.append(0x0222) #glyph00546
glyphs.append(0x010C) #glyph00268
glyphs.append(0x010D) #glyph00269
glyphs.append(0x021F) #glyph00543
glyphs.append(0x021E) #glyph00542
glyphs.append(0x0079) #uni0975
glyphs.append(0x0078) #uni0974
glyphs.append(0x007B) #uni0977
glyphs.append(0x007A) #uni0976
glyphs.append(0x0104) #glyph00260
glyphs.append(0x0074) #uni0970
glyphs.append(0x0077) #uni0973
glyphs.append(0x0076) #uni0972
glyphs.append(0x035D) #uni1CF5
glyphs.append(0x0190) #glyph00400
glyphs.append(0x0191) #glyph00401
glyphs.append(0x0192) #glyph00402
glyphs.append(0x0193) #glyph00403
glyphs.append(0x0194) #glyph00404
glyphs.append(0x0195) #glyph00405
glyphs.append(0x0196) #glyph00406
glyphs.append(0x0197) #glyph00407
glyphs.append(0x0198) #glyph00408
glyphs.append(0x0199) #glyph00409
glyphs.append(0x0300) #braceleft
glyphs.append(0x0304) #macron
glyphs.append(0x0324) #uniA8E8
glyphs.append(0x0350) #uni1CE8
glyphs.append(0x0103) #glyph00259
glyphs.append(0x00A9) #glyph00169
glyphs.append(0x00A8) #glyph00168
glyphs.append(0x0309) #quotedblleft
glyphs.append(0x0102) #glyph00258
glyphs.append(0x00A3) #glyph00163
glyphs.append(0x00A2) #glyph00162
glyphs.append(0x00A1) #glyph00161
glyphs.append(0x00A0) #glyph00160
glyphs.append(0x00A7) #glyph00167
glyphs.append(0x00A6) #glyph00166
glyphs.append(0x00A5) #glyph00165
glyphs.append(0x00A4) #glyph00164
glyphs.append(0x0245) #glyph00581
glyphs.append(0x0351) #uni1CE9
glyphs.append(0x022B) #glyph00555
glyphs.append(0x0293) #glyph00659
glyphs.append(0x0292) #glyph00658
glyphs.append(0x028F) #glyph00655
glyphs.append(0x0228) #glyph00552
glyphs.append(0x0291) #glyph00657
glyphs.append(0x0290) #glyph00656
glyphs.append(0x028B) #glyph00651
glyphs.append(0x028A) #glyph00650
glyphs.append(0x028D) #glyph00653
glyphs.append(0x0229) #glyph00553
glyphs.append(0x02CA) #glyph00714
glyphs.append(0x02CB) #glyph00715
glyphs.append(0x02CC) #glyph00716
glyphs.append(0x02CD) #glyph00717
glyphs.append(0x02C6) #glyph00710
glyphs.append(0x0226) #glyph00550
glyphs.append(0x01F3) #glyph00499
glyphs.append(0x01F2) #glyph00498
glyphs.append(0x01F1) #glyph00497
glyphs.append(0x01F0) #glyph00496
glyphs.append(0x0084) #uni02BC
glyphs.append(0x01EE) #glyph00494
glyphs.append(0x01ED) #glyph00493
glyphs.append(0x01EC) #glyph00492
glyphs.append(0x01EB) #glyph00491
glyphs.append(0x01EA) #glyph00490
glyphs.append(0x00FB) #glyph00251
glyphs.append(0x00FA) #glyph00250
glyphs.append(0x02C7) #glyph00711
glyphs.append(0x012B) #glyph00299
glyphs.append(0x012A) #glyph00298
glyphs.append(0x0127) #glyph00295
glyphs.append(0x0126) #glyph00294
glyphs.append(0x0129) #glyph00297
glyphs.append(0x0128) #glyph00296
glyphs.append(0x0123) #glyph00291
glyphs.append(0x0122) #glyph00290
glyphs.append(0x0125) #glyph00293
glyphs.append(0x0124) #glyph00292
glyphs.append(0x02F5) #colon
glyphs.append(0x022E) #glyph00558
glyphs.append(0x028E) #glyph00654
glyphs.append(0x022F) #glyph00559
glyphs.append(0x02DE) #exclam
glyphs.append(0x00C4) #glyph00196
glyphs.append(0x00C5) #glyph00197
glyphs.append(0x00C2) #glyph00194
glyphs.append(0x00C3) #glyph00195
glyphs.append(0x00C0) #glyph00192
glyphs.append(0x00C1) #glyph00193
glyphs.append(0x00BE) #glyph00190
glyphs.append(0x00BF) #glyph00191
glyphs.append(0x02EE) #three
glyphs.append(0x00C6) #glyph00198
glyphs.append(0x00C7) #glyph00199
glyphs.append(0x0305) #endash
glyphs.append(0x0311) #glyph00785
glyphs.append(0x028C) #glyph00652
glyphs.append(0x0283) #glyph00643
glyphs.append(0x02C9) #glyph00713
glyphs.append(0x0171) #glyph00369
glyphs.append(0x0170) #glyph00368
glyphs.append(0x0003) #uni00A0
glyphs.append(0x00E4) #glyph00228
glyphs.append(0x00E5) #glyph00229
glyphs.append(0x0247) #glyph00583
glyphs.append(0x0246) #glyph00582
glyphs.append(0x0249) #glyph00585
glyphs.append(0x0248) #glyph00584
glyphs.append(0x024B) #glyph00587
glyphs.append(0x024A) #glyph00586
glyphs.append(0x00DC) #glyph00220
glyphs.append(0x00DD) #glyph00221
glyphs.append(0x00DE) #glyph00222
glyphs.append(0x00DF) #glyph00223
glyphs.append(0x00E0) #glyph00224
glyphs.append(0x00E1) #glyph00225
glyphs.append(0x00E2) #glyph00226
glyphs.append(0x00E3) #glyph00227
glyphs.append(0x0157) #glyph00343
glyphs.append(0x0156) #glyph00342
glyphs.append(0x0155) #glyph00341
glyphs.append(0x0154) #glyph00340
glyphs.append(0x015B) #glyph00347
glyphs.append(0x015A) #glyph00346
glyphs.append(0x0159) #glyph00345
glyphs.append(0x0158) #glyph00344
glyphs.append(0x015D) #glyph00349
glyphs.append(0x015C) #glyph00348
glyphs.append(0x036B) #glyph00875
glyphs.append(0x036A) #glyph00874
glyphs.append(0x036D) #glyph00877
glyphs.append(0x036C) #glyph00876
glyphs.append(0x0367) #glyph00871
glyphs.append(0x0366) #glyph00870
glyphs.append(0x0369) #glyph00873
glyphs.append(0x0368) #glyph00872
glyphs.append(0x036F) #glyph00879
glyphs.append(0x01EF) #glyph00495
glyphs.append(0x0302) #braceright
glyphs.append(0x0209) #glyph00521
glyphs.append(0x02F2) #seven
glyphs.append(0x0062) #uni095E
glyphs.append(0x01B2) #glyph00434
glyphs.append(0x02CE) #glyph00718
glyphs.append(0x0061) #uni095D
glyphs.append(0x02CF) #glyph00719
glyphs.append(0x016F) #glyph00367
glyphs.append(0x016E) #glyph00366
glyphs.append(0x0208) #glyph00520
glyphs.append(0x002A) #uni0926
glyphs.append(0x002B) #uni0927
glyphs.append(0x0028) #uni0924
glyphs.append(0x0029) #uni0925
glyphs.append(0x0026) #uni0922
glyphs.append(0x0027) #uni0923
glyphs.append(0x0024) #uni0920
glyphs.append(0x0025) #uni0921
glyphs.append(0x0216) #glyph00534
glyphs.append(0x0217) #glyph00535
glyphs.append(0x0218) #glyph00536
glyphs.append(0x0219) #glyph00537
glyphs.append(0x0212) #glyph00530
glyphs.append(0x0213) #glyph00531
glyphs.append(0x002C) #uni0928
glyphs.append(0x002D) #uni0929
glyphs.append(0x027F) #glyph00639
glyphs.append(0x0285) #glyph00645
glyphs.append(0x0033) #uni092F
glyphs.append(0x0031) #uni092D
glyphs.append(0x0032) #uni092E
glyphs.append(0x002F) #uni092B
glyphs.append(0x0030) #uni092C
glyphs.append(0x002E) #uni092A
glyphs.append(0x01C5) #glyph00453
glyphs.append(0x01C4) #glyph00452
glyphs.append(0x01C3) #glyph00451
glyphs.append(0x01C2) #glyph00450
glyphs.append(0x01C9) #glyph00457
glyphs.append(0x01C8) #glyph00456
glyphs.append(0x01C7) #glyph00455
glyphs.append(0x01B9) #glyph00441
glyphs.append(0x02C8) #glyph00712
glyphs.append(0x01B7) #glyph00439
glyphs.append(0x034F) #uni1CE7
glyphs.append(0x02BE) #glyph00702
glyphs.append(0x01B6) #glyph00438
glyphs.append(0x0186) #glyph00390
glyphs.append(0x0187) #glyph00391
glyphs.append(0x0188) #glyph00392
glyphs.append(0x0189) #glyph00393
glyphs.append(0x018A) #glyph00394
glyphs.append(0x018B) #glyph00395
glyphs.append(0x018C) #glyph00396
glyphs.append(0x018D) #glyph00397
glyphs.append(0x018E) #glyph00398
glyphs.append(0x018F) #glyph00399
glyphs.append(0x0134) #glyph00308
glyphs.append(0x0098) #glyph00152
glyphs.append(0x0099) #glyph00153
glyphs.append(0x0096) #glyph00150
glyphs.append(0x0097) #glyph00151
glyphs.append(0x009C) #glyph00156
glyphs.append(0x009D) #glyph00157
glyphs.append(0x009A) #glyph00154
glyphs.append(0x009B) #glyph00155
glyphs.append(0x009E) #glyph00158
glyphs.append(0x009F) #glyph00159
glyphs.append(0x0267) #glyph00615
glyphs.append(0x0270) #glyph00624
glyphs.append(0x0271) #glyph00625
glyphs.append(0x0272) #glyph00626
glyphs.append(0x0273) #glyph00627
glyphs.append(0x026C) #glyph00620
glyphs.append(0x026D) #glyph00621
glyphs.append(0x026E) #glyph00622
glyphs.append(0x026F) #glyph00623
glyphs.append(0x034A) #uni1CE2
glyphs.append(0x034B) #uni1CE3
glyphs.append(0x0348) #uni1CE0
glyphs.append(0x0349) #uni1CE1
glyphs.append(0x0274) #glyph00628
glyphs.append(0x0275) #glyph00629
glyphs.append(0x034C) #uni1CE4
glyphs.append(0x0333) #uniA8F7
glyphs.append(0x030F) #uni20B9
glyphs.append(0x0355) #uni1CED
glyphs.append(0x034D) #uni1CE5
glyphs.append(0x01B0) #glyph00432
glyphs.append(0x02E8) #hyphen
glyphs.append(0x037E) #glyph00894
glyphs.append(0x032B) #uniA8EF
glyphs.append(0x02E9) #period
glyphs.append(0x0085) #uni200C
glyphs.append(0x02F0) #five
glyphs.append(0x0135) #glyph00309
glyphs.append(0x0086) #uni200D
glyphs.append(0x0133) #glyph00307
glyphs.append(0x0132) #glyph00306
glyphs.append(0x0131) #glyph00305
glyphs.append(0x0130) #glyph00304
glyphs.append(0x012F) #glyph00303
glyphs.append(0x012E) #glyph00302
glyphs.append(0x012D) #glyph00301
glyphs.append(0x012C) #glyph00300
glyphs.append(0x032D) #uniA8F1
glyphs.append(0x0306) #emdash
glyphs.append(0x0211) #glyph00529
glyphs.append(0x02FD) #bracketright
glyphs.append(0x02E4) #parenright
glyphs.append(0x0023) #uni091F
glyphs.append(0x0022) #uni091E
glyphs.append(0x0021) #uni091D
glyphs.append(0x0020) #uni091C
glyphs.append(0x001F) #uni091B
glyphs.append(0x001E) #uni091A
glyphs.append(0x0210) #glyph00528
glyphs.append(0x0242) #glyph00578
glyphs.append(0x0243) #glyph00579
glyphs.append(0x023A) #glyph00570
glyphs.append(0x023B) #glyph00571
glyphs.append(0x023C) #glyph00572
glyphs.append(0x023D) #glyph00573
glyphs.append(0x023E) #glyph00574
glyphs.append(0x023F) #glyph00575
glyphs.append(0x0240) #glyph00576
glyphs.append(0x0241) #glyph00577
glyphs.append(0x0111) #glyph00273
glyphs.append(0x0110) #glyph00272
glyphs.append(0x006C) #uni0968
glyphs.append(0x006D) #uni0969
glyphs.append(0x0115) #glyph00277
glyphs.append(0x0114) #glyph00276
glyphs.append(0x0113) #glyph00275
glyphs.append(0x0112) #glyph00274
glyphs.append(0x0066) #uni0962
glyphs.append(0x0067) #uni0963
glyphs.append(0x0064) #uni0960
glyphs.append(0x0116) #glyph00278
glyphs.append(0x006A) #uni0966
glyphs.append(0x006B) #uni0967
glyphs.append(0x0068) #uni0964
glyphs.append(0x0069) #uni0965
glyphs.append(0x02F8) #equal
glyphs.append(0x02EA) #slash
glyphs.append(0x030B) #ellipsis
glyphs.append(0x0325) #uniA8E9
glyphs.append(0x01A1) #glyph00417
glyphs.append(0x01A0) #glyph00416
glyphs.append(0x019F) #glyph00415
glyphs.append(0x019E) #glyph00414
glyphs.append(0x019D) #glyph00413
glyphs.append(0x019C) #glyph00412
glyphs.append(0x019B) #glyph00411
glyphs.append(0x019A) #glyph00410
glyphs.append(0x006F) #uni096B
glyphs.append(0x0070) #uni096C
glyphs.append(0x006E) #uni096A
glyphs.append(0x0073) #uni096F
glyphs.append(0x0071) #uni096D
glyphs.append(0x0072) #uni096E
glyphs.append(0x02E6) #plus
glyphs.append(0x001D) #uni0919
glyphs.append(0x001C) #uni0918
glyphs.append(0x001B) #uni0917
glyphs.append(0x001A) #uni0916
glyphs.append(0x0019) #uni0915
glyphs.append(0x0018) #uni0914
glyphs.append(0x0017) #uni0913
glyphs.append(0x0016) #uni0912
glyphs.append(0x0015) #uni0911
glyphs.append(0x0014) #uni0910
glyphs.append(0x0341) #uni1CD9
glyphs.append(0x02ED) #two
glyphs.append(0x0294) #glyph00660
glyphs.append(0x0295) #glyph00661
glyphs.append(0x0296) #glyph00662
glyphs.append(0x0297) #glyph00663
glyphs.append(0x0298) #glyph00664
glyphs.append(0x0299) #glyph00665
glyphs.append(0x029A) #glyph00666
glyphs.append(0x029B) #glyph00667
glyphs.append(0x029C) #glyph00668
glyphs.append(0x029D) #glyph00669
glyphs.append(0x02BD) #glyph00701
glyphs.append(0x02BC) #glyph00700
glyphs.append(0x02C3) #glyph00707
glyphs.append(0x02C2) #glyph00706
glyphs.append(0x02C1) #glyph00705
glyphs.append(0x02C0) #glyph00704
glyphs.append(0x01CE) #glyph00462
glyphs.append(0x01CF) #glyph00463
glyphs.append(0x01CC) #glyph00460
glyphs.append(0x01CD) #glyph00461
glyphs.append(0x01D2) #glyph00466
glyphs.append(0x01D3) #glyph00467
glyphs.append(0x01D0) #glyph00464
glyphs.append(0x01D1) #glyph00465
glyphs.append(0x0002) #uni000D
glyphs.append(0x01D4) #glyph00468
glyphs.append(0x01D5) #glyph00469
glyphs.append(0x02FA) #question
glyphs.append(0x017A) #glyph00378
glyphs.append(0x020B) #glyph00523
glyphs.append(0x020A) #glyph00522
glyphs.append(0x0055) #uni0951
glyphs.append(0x0054) #uni0950
glyphs.append(0x017B) #glyph00379
glyphs.append(0x036E) #glyph00878
glyphs.append(0x020F) #glyph00527
glyphs.append(0x0001) #uni0000
glyphs.append(0x020E) #glyph00526
glyphs.append(0x00B9) #glyph00185
glyphs.append(0x00B8) #glyph00184
glyphs.append(0x00BB) #glyph00187
glyphs.append(0x00BA) #glyph00186
glyphs.append(0x00B5) #glyph00181
glyphs.append(0x00B4) #glyph00180
glyphs.append(0x00B7) #glyph00183
glyphs.append(0x00B6) #glyph00182
glyphs.append(0x020C) #glyph00524
glyphs.append(0x00BD) #glyph00189
glyphs.append(0x00BC) #glyph00188
glyphs.append(0x033D) #uni1CD5
glyphs.append(0x033C) #uni1CD4
glyphs.append(0x0340) #uni1CD8
glyphs.append(0x005D) #uni0959
glyphs.append(0x005C) #uni0958
glyphs.append(0x0288) #glyph00648
glyphs.append(0x02F6) #semicolon
glyphs.append(0x034E) #uni1CE6
glyphs.append(0x0289) #glyph00649
glyphs.append(0x0336) #uniA8FA
glyphs.append(0x0337) #uniA8FB
glyphs.append(0x0060) #uni095C
glyphs.append(0x005F) #uni095B
glyphs.append(0x005E) #uni095A
glyphs.append(0x0063) #uni095F
glyphs.append(0x00EF) #glyph00239
glyphs.append(0x00EE) #glyph00238
glyphs.append(0x00ED) #glyph00237
glyphs.append(0x00EC) #glyph00236
glyphs.append(0x00EB) #glyph00235
glyphs.append(0x00EA) #glyph00234
glyphs.append(0x00E9) #glyph00233
glyphs.append(0x00E8) #glyph00232
glyphs.append(0x00E7) #glyph00231
glyphs.append(0x00E6) #glyph00230
glyphs.append(0x0150) #glyph00336
glyphs.append(0x0151) #glyph00337
glyphs.append(0x014E) #glyph00334
glyphs.append(0x014F) #glyph00335
glyphs.append(0x014C) #glyph00332
glyphs.append(0x014D) #glyph00333
glyphs.append(0x014A) #glyph00330
glyphs.append(0x014B) #glyph00331
glyphs.append(0x0282) #glyph00642
glyphs.append(0x02E1) #percent
glyphs.append(0x0152) #glyph00338
glyphs.append(0x0153) #glyph00339
glyphs.append(0x02B0) #glyph00688
glyphs.append(0x02B1) #glyph00689
glyphs.append(0x02AA) #glyph00682
glyphs.append(0x02AB) #glyph00683
glyphs.append(0x02A8) #glyph00680
glyphs.append(0x02A9) #glyph00681
glyphs.append(0x02AE) #glyph00686
glyphs.append(0x02AF) #glyph00687
glyphs.append(0x02AC) #glyph00684
glyphs.append(0x02AD) #glyph00685
glyphs.append(0x02D1) #glyph00721
glyphs.append(0x02D0) #glyph00720
glyphs.append(0x02D9) #glyph00729
glyphs.append(0x02E5) #asterisk
glyphs.append(0x010F) #glyph00271
glyphs.append(0x010E) #glyph00270
glyphs.append(0x0331) #uniA8F5
glyphs.append(0x0057) #uni0953
glyphs.append(0x0056) #uni0952
glyphs.append(0x00F8) #glyph00248
glyphs.append(0x00F9) #glyph00249
glyphs.append(0x005B) #uni0957
glyphs.append(0x005A) #uni0956
glyphs.append(0x0059) #uni0955
glyphs.append(0x0058) #uni0954
glyphs.append(0x00F2) #glyph00242
glyphs.append(0x00F3) #glyph00243
glyphs.append(0x00F0) #glyph00240
glyphs.append(0x00F1) #glyph00241
glyphs.append(0x00F6) #glyph00246
glyphs.append(0x00F7) #glyph00247
glyphs.append(0x00F4) #glyph00244
glyphs.append(0x00F5) #glyph00245
glyphs.append(0x0174) #glyph00372
glyphs.append(0x0334) #uniA8F8
glyphs.append(0x0335) #uniA8F9
glyphs.append(0x0175) #glyph00373
glyphs.append(0x0330) #uniA8F4
glyphs.append(0x02FB) #bracketleft
glyphs.append(0x0332) #uniA8F6
glyphs.append(0x022D) #glyph00557
glyphs.append(0x032C) #uniA8F0
glyphs.append(0x0117) #glyph00279
glyphs.append(0x032E) #uniA8F2
glyphs.append(0x032F) #uniA8F3
return glyphs
|
davelab6/pyfontaine
|
fontaine/charsets/noto_glyphs/notosansdevanagari_regular.py
|
Python
|
gpl-3.0
| 38,139
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=relative-import
"""Generate template values for attributes.
Extends IdlType with property |constructor_type_name|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import idl_types
from idl_types import inherits_interface
from v8_globals import includes
import v8_types
import v8_utilities
from v8_utilities import (cpp_name_or_partial, capitalize, cpp_name, has_extended_attribute,
has_extended_attribute_value, scoped_name, strip_suffix,
uncapitalize, extended_attribute_value_as_list, is_unforgeable,
is_legacy_interface_type_checking)
def attribute_context(interface, attribute, interfaces):
"""Creates a Jinja template context for an attribute of an interface.
Args:
interface: An interface which |attribute| belongs to
attribute: An attribute to create the context for
interfaces: A dict which maps an interface name to the definition
which can be referred if needed
Returns:
A Jinja template context for |attribute|
"""
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
idl_type.add_includes_for_type(extended_attributes)
if idl_type.enum_values:
includes.add('core/inspector/ConsoleMessage.h')
# [CheckSecurity]
is_do_not_check_security = 'DoNotCheckSecurity' in extended_attributes
is_check_security_for_receiver = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Receiver') and
not is_do_not_check_security)
is_check_security_for_return_value = (
has_extended_attribute_value(attribute, 'CheckSecurity', 'ReturnValue'))
if is_check_security_for_receiver or is_check_security_for_return_value:
includes.add('bindings/core/v8/BindingSecurity.h')
# [Constructor]
# TODO(yukishiino): Constructors are much like methods although constructors
# are not methods. Constructors must be data-type properties, and we can
# support them as a kind of methods.
constructor_type = idl_type.constructor_type_name if is_constructor_attribute(attribute) else None
# [CEReactions]
is_ce_reactions = 'CEReactions' in extended_attributes
if is_ce_reactions:
includes.add('core/dom/custom/CEReactionsScope.h')
# [CustomElementCallbacks], [Reflect]
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
is_reflect = 'Reflect' in extended_attributes
if is_custom_element_callbacks or is_reflect:
includes.add('core/dom/custom/V0CustomElementProcessingStack.h')
# [ImplementedInPrivateScript]
is_implemented_in_private_script = 'ImplementedInPrivateScript' in extended_attributes
if is_implemented_in_private_script:
includes.add('bindings/core/v8/PrivateScriptRunner.h')
includes.add('core/frame/LocalFrame.h')
includes.add('platform/ScriptForbiddenScope.h')
# [OnlyExposedToPrivateScript]
is_only_exposed_to_private_script = 'OnlyExposedToPrivateScript' in extended_attributes
# [PerWorldBindings]
if 'PerWorldBindings' in extended_attributes:
assert idl_type.is_wrapper_type or 'LogActivity' in extended_attributes, '[PerWorldBindings] should only be used with wrapper types: %s.%s' % (interface.name, attribute.name)
# [SaveSameObject]
is_save_same_object = (
'SameObject' in attribute.extended_attributes and
'SaveSameObject' in attribute.extended_attributes)
if is_save_same_object:
includes.add('bindings/core/v8/V8PrivateProperty.h')
if (base_idl_type == 'EventHandler' and
interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
cached_attribute_validation_method = extended_attributes.get('CachedAttribute')
keep_alive_for_gc = is_keep_alive_for_gc(interface, attribute)
if cached_attribute_validation_method or keep_alive_for_gc:
includes.add('bindings/core/v8/V8HiddenValue.h')
# [CachedAccessor]
is_cached_accessor = 'CachedAccessor' in extended_attributes
if is_cached_accessor:
includes.add('bindings/core/v8/V8PrivateProperty.h')
context = {
'access_control_list': access_control_list(interface, attribute),
'activity_logging_world_list_for_getter': v8_utilities.activity_logging_world_list(attribute, 'Getter'), # [ActivityLogging]
'activity_logging_world_list_for_setter': v8_utilities.activity_logging_world_list(attribute, 'Setter'), # [ActivityLogging]
'activity_logging_world_check': v8_utilities.activity_logging_world_check(attribute), # [ActivityLogging]
'argument_cpp_type': idl_type.cpp_type_args(used_as_rvalue_type=True),
'cached_attribute_validation_method': cached_attribute_validation_method,
'constructor_type': constructor_type,
'cpp_name': cpp_name(attribute),
'cpp_type': idl_type.cpp_type,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'deprecate_as': v8_utilities.deprecate_as(attribute), # [DeprecateAs]
'enum_type': idl_type.enum_type,
'enum_values': idl_type.enum_values,
'exposed_test': v8_utilities.exposed(attribute, interface), # [Exposed]
'has_custom_getter': has_custom_getter(attribute),
'has_custom_setter': has_custom_setter(attribute),
'has_setter': has_setter(interface, attribute),
'idl_type': str(idl_type), # need trailing [] on array for Dictionary::ConversionContext::setConversionType
'is_cached_accessor': is_cached_accessor,
'is_call_with_execution_context': has_extended_attribute_value(attribute, 'CallWith', 'ExecutionContext'),
'is_call_with_script_state': has_extended_attribute_value(attribute, 'CallWith', 'ScriptState'),
'is_ce_reactions': is_ce_reactions,
'is_check_security_for_receiver': is_check_security_for_receiver,
'is_check_security_for_return_value': is_check_security_for_return_value,
'is_custom_element_callbacks': is_custom_element_callbacks,
# TODO(yukishiino): Make all DOM attributes accessor-type properties.
'is_data_type_property': not ('CachedAccessor' in extended_attributes) and is_data_type_property(interface, attribute),
'is_getter_raises_exception': # [RaisesException]
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in (None, 'Getter'),
'is_implemented_in_private_script': is_implemented_in_private_script,
'is_keep_alive_for_gc': keep_alive_for_gc,
'is_lenient_this': 'LenientThis' in extended_attributes,
'is_nullable': idl_type.is_nullable,
'is_explicit_nullable': idl_type.is_explicit_nullable,
'is_partial_interface_member':
'PartialInterfaceImplementedAs' in extended_attributes,
'is_per_world_bindings': 'PerWorldBindings' in extended_attributes,
'is_put_forwards': 'PutForwards' in extended_attributes,
'is_read_only': attribute.is_read_only,
'is_reflect': is_reflect,
'is_replaceable': 'Replaceable' in attribute.extended_attributes,
'is_save_same_object': is_save_same_object,
'is_static': attribute.is_static,
'is_url': 'URL' in extended_attributes,
'is_unforgeable': is_unforgeable(interface, attribute),
'on_instance': v8_utilities.on_instance(interface, attribute),
'on_interface': v8_utilities.on_interface(interface, attribute),
'on_prototype': v8_utilities.on_prototype(interface, attribute),
'origin_trial_enabled_function': v8_utilities.origin_trial_enabled_function_name(attribute), # [OriginTrialEnabled]
'origin_trial_feature_name': v8_utilities.origin_trial_feature_name(attribute), # [OriginTrialEnabled]
'use_output_parameter_for_result': idl_type.use_output_parameter_for_result,
'measure_as': v8_utilities.measure_as(attribute, interface), # [MeasureAs]
'name': attribute.name,
'only_exposed_to_private_script': is_only_exposed_to_private_script,
'private_script_v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue', bailout_return_value='false', isolate='scriptState->isolate()'),
'property_attributes': property_attributes(interface, attribute),
'reflect_empty': extended_attributes.get('ReflectEmpty'),
'reflect_invalid': extended_attributes.get('ReflectInvalid', ''),
'reflect_missing': extended_attributes.get('ReflectMissing'),
'reflect_only': extended_attribute_value_as_list(attribute, 'ReflectOnly'),
'runtime_enabled_function': v8_utilities.runtime_enabled_function_name(attribute), # [RuntimeEnabled]
'runtime_feature_name': v8_utilities.runtime_feature_name(attribute), # [RuntimeEnabled]
'secure_context_test': v8_utilities.secure_context(attribute, interface), # [SecureContext]
'should_be_exposed_to_script': not (is_implemented_in_private_script and is_only_exposed_to_private_script),
'cached_accessor_name': '%s%sCachedAccessor' % (interface.name, attribute.name.capitalize()),
'world_suffixes': (
['', 'ForMainWorld']
if 'PerWorldBindings' in extended_attributes
else ['']), # [PerWorldBindings]
}
if is_constructor_attribute(attribute):
update_constructor_attribute_context(interface, attribute, context)
if not has_custom_getter(attribute):
getter_context(interface, attribute, context)
if not has_custom_setter(attribute) and has_setter(interface, attribute):
setter_context(interface, attribute, interfaces, context)
return context
def filter_has_accessor_configuration(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
attribute['secure_context_test'] or
attribute['origin_trial_enabled_function'] or
attribute['runtime_enabled_function']) and
not attribute['is_data_type_property'] and
attribute['should_be_exposed_to_script']]
def filter_has_data_attribute_configuration(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
attribute['secure_context_test'] or
attribute['origin_trial_enabled_function'] or
attribute['runtime_enabled_function']) and
attribute['is_data_type_property'] and
attribute['should_be_exposed_to_script']]
def is_lazy_data_attribute(attribute):
return attribute['constructor_type'] and not attribute['needs_constructor_getter_callback']
def filter_has_attribute_configuration(attributes):
return [attribute for attribute in filter_has_data_attribute_configuration(attributes) if not is_lazy_data_attribute(attribute)]
def filter_has_lazy_data_attribute_configuration(attributes):
return [attribute for attribute in filter_has_data_attribute_configuration(attributes) if is_lazy_data_attribute(attribute)]
def filter_origin_trial_enabled(attributes):
return [attribute for attribute in attributes if
attribute['origin_trial_feature_name'] and
not attribute['exposed_test']]
def filter_purely_runtime_enabled(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
attribute['secure_context_test']) and
attribute['runtime_feature_name']]
def attribute_filters():
return {'has_accessor_configuration': filter_has_accessor_configuration,
'has_attribute_configuration': filter_has_attribute_configuration,
'has_lazy_data_attribute_configuration': filter_has_lazy_data_attribute_configuration,
'origin_trial_enabled_attributes': filter_origin_trial_enabled,
'purely_runtime_enabled_attributes': filter_purely_runtime_enabled}
################################################################################
# Getter
################################################################################
def getter_context(interface, attribute, context):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
cpp_value = getter_expression(interface, attribute, context)
# Normally we can inline the function call into the return statement to
# avoid the overhead of using a Ref<> temporary, but for some cases
# (nullable types, EventHandler, [CachedAttribute], or if there are
# exceptions), we need to use a local variable.
# FIXME: check if compilers are smart enough to inline this, and if so,
# always use a local variable (for readability and CG simplicity).
if 'ImplementedInPrivateScript' in extended_attributes:
if (not idl_type.is_wrapper_type and
not idl_type.is_basic_type and
not idl_type.is_enum):
raise Exception('Private scripts supports only primitive types and DOM wrappers.')
context['cpp_value_original'] = cpp_value
cpp_value = 'result'
elif (idl_type.is_explicit_nullable or
base_idl_type == 'EventHandler' or
'CachedAttribute' in extended_attributes or
'ReflectOnly' in extended_attributes or
context['is_keep_alive_for_gc'] or
context['is_getter_raises_exception']):
context['cpp_value_original'] = cpp_value
cpp_value = 'cppValue'
def v8_set_return_value_statement(for_main_world=False):
if context['is_keep_alive_for_gc'] or 'CachedAttribute' in extended_attributes:
return 'v8SetReturnValue(info, v8Value)'
return idl_type.v8_set_return_value(
cpp_value, extended_attributes=extended_attributes, script_wrappable='impl',
for_main_world=for_main_world, is_static=attribute.is_static)
context.update({
'cpp_value': cpp_value,
'cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
cpp_value=cpp_value, creation_context='holder',
extended_attributes=extended_attributes),
'v8_set_return_value_for_main_world': v8_set_return_value_statement(for_main_world=True),
'v8_set_return_value': v8_set_return_value_statement(),
})
def getter_expression(interface, attribute, context):
arguments = []
this_getter_base_name = getter_base_name(interface, attribute, arguments)
getter_name = scoped_name(interface, attribute, this_getter_base_name)
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
arguments.append('toLocalFrame(toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext()))')
arguments.append('impl')
arguments.append('&result')
arguments.extend(v8_utilities.call_with_arguments(
attribute.extended_attributes.get('CallWith')))
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in attribute.extended_attributes and
'ImplementedInPrivateScript' not in attribute.extended_attributes and
not attribute.is_static):
arguments.append('*impl')
if attribute.idl_type.is_explicit_nullable:
arguments.append('isNull')
if context['is_getter_raises_exception']:
arguments.append('exceptionState')
if attribute.idl_type.use_output_parameter_for_result:
arguments.append('result')
expression = '%s(%s)' % (getter_name, ', '.join(arguments))
# Needed to handle getter expressions returning Type& as the
# use site for |expression| expects Type*.
if attribute.idl_type.is_interface_type and len(arguments) == 0:
return 'WTF::getPtr(%s)' % expression
return expression
CONTENT_ATTRIBUTE_GETTER_NAMES = {
'boolean': 'fastHasAttribute',
'long': 'getIntegralAttribute',
'unsigned long': 'getUnsignedIntegralAttribute',
}
def getter_base_name(interface, attribute, arguments):
extended_attributes = attribute.extended_attributes
if 'ImplementedInPrivateScript' in extended_attributes:
return '%sAttributeGetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in extended_attributes:
return uncapitalize(cpp_name(attribute))
content_attribute_name = extended_attributes['Reflect'] or attribute.name.lower()
if content_attribute_name in ['class', 'id', 'name']:
# Special-case for performance optimization.
return 'get%sAttribute' % content_attribute_name.capitalize()
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_GETTER_NAMES:
return CONTENT_ATTRIBUTE_GETTER_NAMES[base_idl_type]
if 'URL' in attribute.extended_attributes:
return 'getURLAttribute'
return 'fastGetAttribute'
def is_keep_alive_for_gc(interface, attribute):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
return (
# For readonly attributes, for performance reasons we keep the attribute
# wrapper alive while the owner wrapper is alive, because the attribute
# never changes.
(attribute.is_read_only and
idl_type.is_wrapper_type and
# There are some exceptions, however:
not(
# Node lifetime is managed by object grouping.
inherits_interface(interface.name, 'Node') or
inherits_interface(base_idl_type, 'Node') or
# A self-reference is unnecessary.
attribute.name == 'self' or
# FIXME: Remove these hard-coded hacks.
base_idl_type in ['EventTarget', 'Window'] or
base_idl_type.startswith(('HTML', 'SVG')))))
################################################################################
# Setter
################################################################################
def setter_context(interface, attribute, interfaces, context):
if 'PutForwards' in attribute.extended_attributes:
# Use target interface and attribute in place of original interface and
# attribute from this point onwards.
target_interface_name = attribute.idl_type.base_type
target_attribute_name = attribute.extended_attributes['PutForwards']
interface = interfaces[target_interface_name]
try:
attribute = next(candidate
for candidate in interface.attributes
if candidate.name == target_attribute_name)
except StopIteration:
raise Exception('[PutForward] target not found:\n'
'Attribute "%s" is not present in interface "%s"' %
(target_attribute_name, target_interface_name))
if ('Replaceable' in attribute.extended_attributes):
context['cpp_setter'] = 'v8CallBoolean(info.Holder()->CreateDataProperty(info.GetIsolate()->GetCurrentContext(), propertyName, v8Value))'
return
extended_attributes = attribute.extended_attributes
idl_type = attribute.idl_type
# [RaisesException], [RaisesException=Setter]
is_setter_raises_exception = (
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in [None, 'Setter'])
# [LegacyInterfaceTypeChecking]
has_type_checking_interface = (
not is_legacy_interface_type_checking(interface, attribute) and
idl_type.is_wrapper_type)
context.update({
'has_setter_exception_state':
is_setter_raises_exception or has_type_checking_interface or
idl_type.v8_conversion_needs_exception_state,
'has_type_checking_interface': has_type_checking_interface,
'is_setter_call_with_execution_context': has_extended_attribute_value(
attribute, 'SetterCallWith', 'ExecutionContext'),
'is_setter_raises_exception': is_setter_raises_exception,
'private_script_cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
'cppValue', isolate='scriptState->isolate()',
creation_context='scriptState->context()->Global()'),
'v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue'),
})
# setter_expression() depends on context values we set above.
context['cpp_setter'] = setter_expression(interface, attribute, context)
def setter_expression(interface, attribute, context):
extended_attributes = attribute.extended_attributes
arguments = v8_utilities.call_with_arguments(
extended_attributes.get('SetterCallWith') or
extended_attributes.get('CallWith'))
this_setter_base_name = setter_base_name(interface, attribute, arguments)
setter_name = scoped_name(interface, attribute, this_setter_base_name)
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in extended_attributes and
'ImplementedInPrivateScript' not in extended_attributes and
not attribute.is_static):
arguments.append('*impl')
idl_type = attribute.idl_type
if 'ImplementedInPrivateScript' in extended_attributes:
arguments.append('toLocalFrame(toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext()))')
arguments.append('impl')
arguments.append('cppValue')
elif idl_type.base_type == 'EventHandler':
getter_name = scoped_name(interface, attribute, cpp_name(attribute))
context['event_handler_getter_expression'] = '%s(%s)' % (
getter_name, ', '.join(arguments))
if (interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
arguments.append(
'V8EventListenerHelper::ensureEventListener<V8ErrorHandler>(' +
'v8Value, true, ScriptState::forReceiverObject(info))')
else:
arguments.append(
'V8EventListenerHelper::getEventListener(' +
'ScriptState::forReceiverObject(info), v8Value, true, ' +
'ListenerFindOrCreate)')
else:
arguments.append('cppValue')
if context['is_setter_raises_exception']:
arguments.append('exceptionState')
return '%s(%s)' % (setter_name, ', '.join(arguments))
CONTENT_ATTRIBUTE_SETTER_NAMES = {
'boolean': 'setBooleanAttribute',
'long': 'setIntegralAttribute',
'unsigned long': 'setUnsignedIntegralAttribute',
}
def setter_base_name(interface, attribute, arguments):
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
return '%sAttributeSetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in attribute.extended_attributes:
return 'set%s' % capitalize(cpp_name(attribute))
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_SETTER_NAMES:
return CONTENT_ATTRIBUTE_SETTER_NAMES[base_idl_type]
return 'setAttribute'
def scoped_content_attribute_name(interface, attribute):
content_attribute_name = attribute.extended_attributes['Reflect'] or attribute.name.lower()
if interface.name.startswith('SVG'):
namespace = 'SVGNames'
else:
namespace = 'HTMLNames'
includes.add('core/%s.h' % namespace)
return '%s::%sAttr' % (namespace, content_attribute_name)
################################################################################
# Attribute configuration
################################################################################
# Property descriptor's {writable: boolean}
def is_writable(attribute):
return (not attribute.is_read_only or
'PutForwards' in attribute.extended_attributes or
'Replaceable' in attribute.extended_attributes)
def is_data_type_property(interface, attribute):
return (is_constructor_attribute(attribute) or
interface.name == 'Window' or
interface.name == 'Location')
# [PutForwards], [Replaceable]
def has_setter(interface, attribute):
if (is_data_type_property(interface, attribute) and
(is_constructor_attribute(attribute) or
'Replaceable' in attribute.extended_attributes)):
return False
return is_writable(attribute)
# [DoNotCheckSecurity], [Unforgeable]
def access_control_list(interface, attribute):
extended_attributes = attribute.extended_attributes
access_control = []
if 'DoNotCheckSecurity' in extended_attributes:
do_not_check_security = extended_attributes['DoNotCheckSecurity']
if do_not_check_security == 'Setter':
access_control.append('v8::ALL_CAN_WRITE')
else:
access_control.append('v8::ALL_CAN_READ')
if has_setter(interface, attribute):
access_control.append('v8::ALL_CAN_WRITE')
if is_unforgeable(interface, attribute):
access_control.append('v8::PROHIBITS_OVERWRITING')
return access_control or ['v8::DEFAULT']
# [NotEnumerable], [Unforgeable]
def property_attributes(interface, attribute):
extended_attributes = attribute.extended_attributes
property_attributes_list = []
if ('NotEnumerable' in extended_attributes or
is_constructor_attribute(attribute)):
property_attributes_list.append('v8::DontEnum')
if is_unforgeable(interface, attribute):
property_attributes_list.append('v8::DontDelete')
if not is_writable(attribute):
property_attributes_list.append('v8::ReadOnly')
return property_attributes_list or ['v8::None']
# [Custom], [Custom=Getter]
def has_custom_getter(attribute):
extended_attributes = attribute.extended_attributes
return ('Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Getter'])
# [Custom], [Custom=Setter]
def has_custom_setter(attribute):
extended_attributes = attribute.extended_attributes
return (not attribute.is_read_only and
'Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Setter'])
################################################################################
# Constructors
################################################################################
idl_types.IdlType.constructor_type_name = property(
# FIXME: replace this with a [ConstructorAttribute] extended attribute
lambda self: strip_suffix(self.base_type, 'Constructor'))
def is_constructor_attribute(attribute):
# FIXME: replace this with [ConstructorAttribute] extended attribute
return attribute.idl_type.name.endswith('Constructor')
def update_constructor_attribute_context(interface, attribute, context):
context['needs_constructor_getter_callback'] = context['measure_as'] or context['deprecate_as']
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Source/bindings/scripts/v8_attributes.py
|
Python
|
gpl-3.0
| 29,128
|
#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TestInput(object):
"""Groups information about a test for easy passing of data."""
def __init__(self, test_name, timeout):
"""Holds the input parameters for a test.
Args:
test: name of test (not an absolute path!)
timeout: Timeout in msecs the driver should use while running the test
"""
self.test_name = test_name
self.timeout = timeout
# TestInput objects are normally constructed by the manager and passed
# to the workers, but these two fields are set lazily in the workers
# because they require us to figure out if the test is a reftest or not
# and we want to be able to do that in parallel.
self.should_run_pixel_tests = None
self.reference_files = None
def __repr__(self):
return "TestInput('%s', %d, %s, %s)" % (self.test_name, self.timeout, self.should_run_pixel_tests, self.reference_files)
|
cs-au-dk/Artemis
|
WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
|
Python
|
gpl-3.0
| 2,580
|
# coding: utf-8
import os.path
from datetime import datetime
from django.conf import settings
from django.test.client import RequestFactory
from django.test.utils import override_settings
import pytest
from django_jinja.backend import Jinja2
from jinja2 import Markup
from mock import patch
from pyquery import PyQuery as pq
from bedrock.base.templatetags.helpers import static
from bedrock.mozorg.templatetags import misc
from bedrock.mozorg.tests import TestCase
from lib.l10n_utils.fluent import fluent_l10n
TEST_FILES_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test_files')
TEST_L10N_MEDIA_PATH = os.path.join(TEST_FILES_ROOT, 'media', '%s', 'l10n')
TEST_DONATE_LINK = ('https://donate.mozilla.org/{locale}/'
'?presets={presets}&amount={default}'
'&utm_source=mozilla.org&utm_medium=referral&utm_content={source}'
'¤cy={currency}')
TEST_DONATE_PARAMS = {
'en-US': {
'currency': 'usd',
'presets': '100,50,25,15',
'default': '50'
},
'es-MX': {
'currency': 'eur',
'presets': '100,50,25,15',
'default': '15'
},
}
TEST_FIREFOX_TWITTER_ACCOUNTS = {
'en-US': 'https://twitter.com/firefox',
'es-ES': 'https://twitter.com/firefox_es',
'pt-BR': 'https://twitter.com/firefoxbrasil',
}
TEST_FXA_ENDPOINT = 'https://accounts.firefox.com/'
TEST_FXA_MOZILLAONLINE_ENDPOINT = 'https://accounts.firefox.com.cn/'
jinja_env = Jinja2.get_default()
# Where should this function go?
def render(s, context=None):
t = jinja_env.from_string(s)
return t.render(context or {})
def test_convert_to_high_res():
assert misc.convert_to_high_res('/media/img/the.dude.png') == '/media/img/the.dude-high-res.png'
assert misc.convert_to_high_res('/media/thats-a-bummer-man.jpg') == '/media/thats-a-bummer-man-high-res.jpg'
@patch('bedrock.mozorg.templatetags.misc._l10n_media_exists')
@patch('django.conf.settings.LANGUAGE_CODE', 'en-US')
class TestImgL10n(TestCase):
rf = RequestFactory()
def _render(self, locale, url):
req = self.rf.get('/')
req.locale = locale
return render("{{{{ l10n_img('{0}') }}}}".format(url),
{'request': req})
def test_works_for_default_lang(self, media_exists_mock):
"""Should output correct path for default lang always."""
media_exists_mock.return_value = True
assert self._render('en-US', 'dino/head.png') == static('img/l10n/en-US/dino/head.png')
assert self._render('en-US', 'img/dino/head.png') == static('img/l10n/en-US/dino/head.png')
assert self._render('en-US', 'dino/does-not-exist.png') == static('img/l10n/en-US/dino/does-not-exist.png')
def test_works_for_other_lang(self, media_exists_mock):
"""Should use the request lang if file exists."""
media_exists_mock.return_value = True
assert self._render('de', 'dino/head.png') == static('img/l10n/de/dino/head.png')
assert self._render('de', 'img/dino/head.png') == static('img/l10n/de/dino/head.png')
def test_defaults_when_lang_file_missing(self, media_exists_mock):
"""Should use default lang when file doesn't exist for lang."""
media_exists_mock.return_value = False
assert self._render('is', 'dino/head.png') == static('img/l10n/en-US/dino/head.png')
def test_latam_spanishes_fallback_to_european_spanish(self, media_exists_mock):
"""Should use es-ES image when file doesn't exist for lang."""
media_exists_mock.side_effect = [False, True]
assert self._render('es-AR', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
media_exists_mock.reset_mock()
media_exists_mock.side_effect = [False, True]
assert self._render('es-CL', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
media_exists_mock.reset_mock()
media_exists_mock.side_effect = [False, True]
assert self._render('es-MX', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
media_exists_mock.reset_mock()
media_exists_mock.side_effect = [False, True]
assert self._render('es', 'dino/head.png') == static('img/l10n/es-ES/dino/head.png')
def test_file_not_checked_for_default_lang(self, media_exists_mock):
"""
Should not check filesystem for default lang, but should for others.
"""
assert self._render('en-US', 'dino/does-not-exist.png') == static('img/l10n/en-US/dino/does-not-exist.png')
assert not media_exists_mock.called
self._render('is', 'dino/does-not-exist.png')
media_exists_mock.assert_called_once_with('img', 'is', 'dino/does-not-exist.png')
@override_settings(DEBUG=False)
@patch('bedrock.mozorg.templatetags.misc._l10n_media_exists')
class TestL10nCSS(TestCase):
rf = RequestFactory()
static_url_dev = '/static/'
cdn_url = '//mozorg.cdn.mozilla.net'
static_url_prod = cdn_url + static_url_dev
markup = ('<link rel="stylesheet" media="screen,projection,tv" href='
'"%scss/l10n/%s/intl.css">')
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
return render('{{ l10n_css() }}', {'request': req})
@override_settings(DEV=True)
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_dev)
def test_dev_when_css_file_exists(self, media_exists_mock):
"""Should output a path to the CSS file if exists."""
media_exists_mock.return_value = True
assert self._render('de') == self.markup % (self.static_url_dev, 'de')
assert self._render('es-ES') == self.markup % (self.static_url_dev, 'es-ES')
@override_settings(DEV=True)
def test_dev_when_css_file_missing(self, media_exists_mock):
"""Should output nothing if the CSS file is missing."""
media_exists_mock.return_value = False
assert self._render('en-US') == ''
assert self._render('fr') == ''
@override_settings(DEV=False)
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_prod)
def test_prod_when_css_file_exists(self, media_exists_mock):
"""Should output a path to the CSS file if exists."""
media_exists_mock.return_value = True
assert self._render('de') == self.markup % (self.static_url_prod, 'de')
assert self._render('es-ES') == self.markup % (self.static_url_prod, 'es-ES')
@override_settings(DEV=False)
def test_prod_when_css_file_missing(self, media_exists_mock):
"""Should output nothing if the CSS file is missing."""
media_exists_mock.return_value = False
assert self._render('en-US') == ''
assert self._render('fr') == ''
class TestVideoTag(TestCase):
rf = RequestFactory()
# Video stubs
moz_video = 'http://videos.mozilla.org/serv/flux/example.%s'
nomoz_video = 'http://example.org/example.%s'
def get_l10n(self, locale):
return fluent_l10n([locale, 'en'], settings.FLUENT_DEFAULT_FILES)
def _render(self, template):
req = self.rf.get('/')
req.locale = 'en-US'
return render(template, {'request': req, 'fluent_l10n': self.get_l10n(req.locale)})
def test_empty(self):
# No video, no output.
assert render('{{ video() }}') == ''
def test_video(self):
# A few common variations
videos = [self.nomoz_video % ext for ext in ('ogv', 'mp4', 'webm')]
doc = pq(self._render("{{ video%s }}" % str(tuple(videos))))
# Tags generated?
assert doc('video').length == 1
assert doc('video source').length == 3
# Extensions in the right order?
extensions = [os.path.splitext(el.attrib['src'])[1] for el in doc('video source')]
assert extensions == ['.webm', '.ogv', '.mp4']
def test_prefix(self):
# Prefix should be applied to all videos.
doc = pq(self._render(
"{{ video('meh.mp4', 'meh.ogv', prefix='http://example.com/blah/') }}")
)
assert [el.attrib['src'] for el in doc('video source')] == [
'http://example.com/blah/meh.ogv',
'http://example.com/blah/meh.mp4',
]
def test_fileformats(self):
# URLs ending in strange extensions are ignored.
videos = [self.nomoz_video % ext for ext in
('ogv', 'exe', 'webm', 'txt')]
videos.append('http://example.net/noextension')
doc = pq(self._render("{{ video%s }}" % (str(tuple(videos)))))
assert doc('video source').length == 2
extensions = [os.path.splitext(el.attrib['src'])[1] for el in doc('video source')]
assert extensions == ['.webm', '.ogv']
@override_settings(STATIC_URL='/media/')
@patch('bedrock.mozorg.templatetags.misc.find_static', return_value=True)
class TestPlatformImg(TestCase):
rf = RequestFactory()
def _render(self, url, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ platform_img('{0}', {1}) }}}}".format(url, optional_attributes),
{'request': req})
def _render_l10n(self, url):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ l10n_img('{0}') }}}}".format(url),
{'request': req})
def test_platform_img_no_optional_attributes(self, find_static):
"""Should return expected markup without optional attributes"""
markup = self._render('test.png')
self.assertIn(u'data-src-windows="/media/test-windows.png"', markup)
self.assertIn(u'data-src-mac="/media/test-mac.png"', markup)
markup = self._render('img/test.png')
self.assertIn(u'data-src-windows="/media/img/test-windows.png"', markup)
self.assertIn(u'data-src-mac="/media/img/test-mac.png"', markup)
def test_platform_img_with_optional_attributes(self, find_static):
"""Should return expected markup with optional attributes"""
markup = self._render('test.png', {'data-test-attr': 'test'})
self.assertIn(u'data-test-attr="test"', markup)
def test_platform_img_with_high_res(self, find_static):
"""Should return expected markup with high resolution image attrs"""
markup = self._render('test.png', {'high-res': True})
self.assertIn(u'data-src-windows-high-res="/media/test-windows-high-res.png"', markup)
self.assertIn(u'data-src-mac-high-res="/media/test-mac-high-res.png"', markup)
self.assertIn(u'data-high-res="true"', markup)
markup = self._render('img/test.png', {'high-res': True})
self.assertIn(u'data-src-windows-high-res="/media/img/test-windows-high-res.png"', markup)
self.assertIn(u'data-src-mac-high-res="/media/img/test-mac-high-res.png"', markup)
self.assertIn(u'data-high-res="true"', markup)
def test_platform_img_with_l10n(self, find_static):
"""Should return expected markup with l10n image path"""
l10n_url_win = self._render_l10n('test-windows.png')
l10n_url_mac = self._render_l10n('test-mac.png')
markup = self._render('test.png', {'l10n': True})
self.assertIn(u'data-src-windows="' + l10n_url_win + '"', markup)
self.assertIn(u'data-src-mac="' + l10n_url_mac + '"', markup)
markup = self._render('/img/test.png', {'l10n': True})
self.assertIn(u'data-src-windows="' + l10n_url_win + '"', markup)
self.assertIn(u'data-src-mac="' + l10n_url_mac + '"', markup)
def test_platform_img_with_l10n_and_optional_attributes(self, find_static):
"""
Should return expected markup with l10n image path and optional
attributes
"""
l10n_url_win = self._render_l10n('test-windows.png')
l10n_url_mac = self._render_l10n('test-mac.png')
markup = self._render('test.png', {'l10n': True, 'data-test-attr': 'test'})
self.assertIn(u'data-src-windows="' + l10n_url_win + '"', markup)
self.assertIn(u'data-src-mac="' + l10n_url_mac + '"', markup)
self.assertIn(u'data-test-attr="test"', markup)
def test_platform_img_with_l10n_and_high_res(self, find_static):
"""
Should return expected markup with l10n image path and high resolution
attributes
"""
l10n_url_win = self._render_l10n('test-windows.png')
l10n_hr_url_win = misc.convert_to_high_res(l10n_url_win)
l10n_url_mac = self._render_l10n('test-mac.png')
l10n_hr_url_mac = misc.convert_to_high_res(l10n_url_mac)
markup = self._render('test.png', {'l10n': True, 'high-res': True})
self.assertIn(u'data-src-windows-high-res="' + l10n_hr_url_win + '"', markup)
self.assertIn(u'data-src-mac-high-res="' + l10n_hr_url_mac + '"', markup)
self.assertIn(u'data-high-res="true"', markup)
class TestPressBlogUrl(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
return render("{{{{ press_blog_url() }}}}".format('/'),
{'request': req})
def test_press_blog_url_no_locale(self):
"""No locale, fallback to default press blog"""
assert self._render('') == 'https://blog.mozilla.org/press/'
def test_press_blog_url_english(self):
"""en-US locale, default press blog"""
assert self._render('en-US') == 'https://blog.mozilla.org/press/'
def test_press_blog_url_europe(self):
"""Major European locales have their own blog"""
assert self._render('es-ES') == 'https://blog.mozilla.org/press-es/'
assert self._render('fr') == 'https://blog.mozilla.org/press-fr/'
assert self._render('de') == 'https://blog.mozilla.org/press-de/'
assert self._render('pl') == 'https://blog.mozilla.org/press-pl/'
assert self._render('it') == 'https://blog.mozilla.org/press-it/'
assert self._render('en-GB') == 'https://blog.mozilla.org/press-uk/'
def test_press_blog_url_latam(self):
"""South American Spanishes use the es-ES blog"""
assert self._render('es-AR') == 'https://blog.mozilla.org/press-es/'
assert self._render('es-CL') == 'https://blog.mozilla.org/press-es/'
assert self._render('es-MX') == 'https://blog.mozilla.org/press-es/'
def test_press_blog_url_brazil(self):
"""Brazilian Portuguese has its own br blog"""
assert self._render('pt-BR') == 'https://blog.mozilla.org/press-br/'
def test_press_blog_url_other_locale(self):
"""No blog for locale, fallback to default press blog"""
assert self._render('oc') == 'https://blog.mozilla.org/press/'
@override_settings(
DONATE_LINK=TEST_DONATE_LINK,
DONATE_PARAMS=TEST_DONATE_PARAMS,
)
class TestDonateUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, source=''):
req = self.rf.get('/')
req.locale = locale
return render("{{{{ donate_url('{0}') }}}}".format(source),
{'request': req})
def test_donate_url_no_locale(self):
"""No locale, fallback to default page"""
assert self._render('', 'mozillaorg_footer') == (
'https://donate.mozilla.org//'
'?presets=100,50,25,15&amount=50'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=usd')
def test_donate_url_english(self):
"""en-US locale, default page"""
assert self._render('en-US', 'mozillaorg_footer') == (
'https://donate.mozilla.org/en-US/'
'?presets=100,50,25,15&amount=50'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=usd')
def test_donate_url_spanish(self):
"""es-MX locale, a localized page"""
assert self._render('es-MX', 'mozillaorg_footer') == (
'https://donate.mozilla.org/es-MX/'
'?presets=100,50,25,15&amount=15'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=eur')
def test_donate_url_other_locale(self):
"""No page for locale, fallback to default page"""
assert self._render('pt-PT', 'mozillaorg_footer') == (
'https://donate.mozilla.org/pt-PT/'
'?presets=100,50,25,15&amount=50'
'&utm_source=mozilla.org&utm_medium=referral'
'&utm_content=mozillaorg_footer&currency=usd')
@override_settings(FIREFOX_TWITTER_ACCOUNTS=TEST_FIREFOX_TWITTER_ACCOUNTS)
class TestFirefoxTwitterUrl(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
return render('{{ firefox_twitter_url() }}', {'request': req})
def test_firefox_twitter_url_no_locale(self):
"""No locale, fallback to default account"""
assert self._render('') == 'https://twitter.com/firefox'
def test_firefox_twitter_url_english(self):
"""en-US locale, default account"""
assert self._render('en-US') == 'https://twitter.com/firefox'
def test_firefox_twitter_url_spanish(self):
"""es-ES locale, a local account"""
assert self._render('es-ES') == 'https://twitter.com/firefox_es'
def test_firefox_twitter_url_portuguese(self):
"""pt-BR locale, a local account"""
assert self._render('pt-BR') == 'https://twitter.com/firefoxbrasil'
def test_firefox_twitter_url_other_locale(self):
"""No account for locale, fallback to default account"""
assert self._render('es-AR') == 'https://twitter.com/firefox'
assert self._render('es-CL') == 'https://twitter.com/firefox'
assert self._render('es-MX') == 'https://twitter.com/firefox'
assert self._render('pt-PT') == 'https://twitter.com/firefox'
@override_settings(STATIC_URL='/media/')
class TestHighResImg(TestCase):
rf = RequestFactory()
def _render(self, url, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ high_res_img('{0}', {1}) }}}}".format(url, optional_attributes),
{'request': req})
def _render_l10n(self, url):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ l10n_img('{0}') }}}}".format(url),
{'request': req})
def test_high_res_img_no_optional_attributes(self):
"""Should return expected markup without optional attributes"""
expected = (
u'<img class="" src="/media/img/test.png" '
u'srcset="/media/img/test-high-res.png 1.5x">')
markup = self._render('img/test.png')
self.assertEqual(markup, expected)
def test_high_res_img_with_optional_attributes(self):
"""Should return expected markup with optional attributes"""
markup = self._render('img/test.png', {'data-test-attr': 'test', 'class': 'logo'})
expected = (
u'<img class="logo" src="/media/img/test.png" '
u'srcset="/media/img/test-high-res.png 1.5x" '
u'data-test-attr="test">')
self.assertEqual(markup, expected)
def test_high_res_img_with_l10n(self):
"""Should return expected markup with l10n image path"""
l10n_url = self._render_l10n('test.png')
l10n_hr_url = misc.convert_to_high_res(l10n_url)
markup = self._render('test.png', {'l10n': True})
expected = (
u'<img class="" src="' + l10n_url + '" '
u'srcset="' + l10n_hr_url + ' 1.5x">')
self.assertEqual(markup, expected)
l10n_url = self._render_l10n('img/test.png')
l10n_hr_url = misc.convert_to_high_res(l10n_url)
markup = self._render('test.png', {'l10n': True})
expected = (
u'<img class="" src="' + l10n_url + '" '
u'srcset="' + l10n_hr_url + ' 1.5x">')
self.assertEqual(markup, expected)
def test_high_res_img_with_l10n_and_optional_attributes(self):
"""Should return expected markup with l10n image path"""
l10n_url = self._render_l10n('test.png')
l10n_hr_url = misc.convert_to_high_res(l10n_url)
markup = self._render('test.png', {'l10n': True, 'data-test-attr': 'test'})
expected = (
u'<img class="" src="' + l10n_url + '" '
u'srcset="' + l10n_hr_url + ' 1.5x" data-test-attr="test">')
self.assertEqual(markup, expected)
@override_settings(STATIC_URL='/media/')
class TestLazyImg(TestCase):
rf = RequestFactory()
def _render(self, image_url, placeholder_url, include_highres_image=False, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ lazy_img('{0}', '{1}', {2}, {3}) }}}}".format(image_url, placeholder_url, include_highres_image, optional_attributes),
{'request': req})
def test_lazy_img(self):
"""Should return expected markup"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png',
include_highres_image=True, optional_attributes={'class': 'the-dude', 'alt': 'abides', 'width': '300'})
expected = (
u'<div class="lazy-image-container">'
u'<img class="the-dude" src="/media/img/placeholder.png" data-src="/media/img/test.png" '
u'data-srcset="/media/img/test-high-res.png 2x" alt="abides" width="300">'
u'<noscript><img class="the-dude" src="/media/img/test.png" '
u'data-srcset="/media/img/test-high-res.png 2x" alt="abides" width="300"></noscript>'
u'</div>')
self.assertEqual(markup, expected)
def test_lazy_img_no_highres_image(self):
"""Should return no highres image"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png')
self.assertIn(u'src="/media/img/placeholder.png"', markup)
self.assertIn(u'data-src="/media/img/test.png"', markup)
self.assertNotIn(u'data-srcset="/media/img/test-high-res.png 2x"', markup)
def test_lazy_img_no_optional_attributes(self):
"""Should return default class and alt values if no optional attributes are provided"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png')
self.assertIn(u'class="lazy-image"', markup)
self.assertIn(u'alt=""', markup)
def test_lazy_img_optional_attributes(self):
"""Should return expected optional attributes"""
markup = self._render(image_url='img/test.png', placeholder_url='img/placeholder.png',
optional_attributes={'class': 'the-dude', 'alt': 'abides', 'width': '300'})
self.assertNotIn(u'class="lazy-image"', markup)
self.assertIn(u'class="the-dude"', markup)
self.assertIn(u'alt="abides"', markup)
self.assertIn(u'width="300"', markup)
def test_lazy_img_external(self):
"""Should allow an external image and ignore include_highres_image"""
markup = self._render(image_url='https://www.test.com/test.png', placeholder_url='img/placeholder.png',
include_highres_image=True)
self.assertIn(u'src="/media/img/placeholder.png"', markup)
self.assertIn(u'data-src="https://www.test.com/test.png"', markup)
self.assertNotIn(u'data-srcset="', markup)
class TestAbsoluteURLFilter(TestCase):
rf = RequestFactory()
static_url_dev = '/static/'
static_url_prod = '//mozorg.cdn.mozilla.net/static/'
static_url_full = 'https://mozorg.cdn.mozilla.net/static/'
image_path = 'img/mozorg/mozilla-256.jpg'
inline_template = "{{ static('%s')|absolute_url }}" % image_path
block_template = "{% filter absolute_url %}{% block page_image %}" + \
"{{ static('%s') }}" % image_path + "{% endblock %}{% endfilter %}"
def _render(self, template):
return render(template, {'request': self.rf.get('/')})
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_dev)
def test_image_dev(self):
"""Should return a fully qualified URL including a protocol"""
expected = settings.CANONICAL_URL + self.static_url_dev + self.image_path
assert self._render(self.inline_template) == expected
assert self._render(self.block_template) == expected
@patch('django.contrib.staticfiles.storage.staticfiles_storage.base_url', static_url_prod)
def test_image_prod(self):
"""Should return a fully qualified URL including a protocol"""
expected = 'https:' + self.static_url_prod + self.image_path
assert self._render(self.inline_template) == expected
assert self._render(self.block_template) == expected
@override_settings(DEV=False)
def test_urls(self):
"""Should return a fully qualified URL including a protocol"""
expected = 'https://www.mozilla.org/en-US/firefox/new/'
assert misc.absolute_url('/en-US/firefox/new/') == expected
assert misc.absolute_url('//www.mozilla.org/en-US/firefox/new/') == expected
assert misc.absolute_url('https://www.mozilla.org/en-US/firefox/new/') == expected
class TestFirefoxIOSURL(TestCase):
rf = RequestFactory()
def _render(self, locale, ct_param=None):
req = self.rf.get('/')
req.locale = locale
if ct_param:
return render("{{ firefox_ios_url('%s') }}" % ct_param,
{'request': req})
return render("{{ firefox_ios_url() }}", {'request': req})
def test_firefox_ios_url_no_locale(self):
"""No locale, fallback to default URL"""
assert (
self._render('') == 'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926')
def test_firefox_ios_url_default(self):
"""should fallback to default URL"""
assert (
self._render('ar') == 'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926')
assert (
self._render('zu') == 'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926')
def test_firefox_ios_url_localized(self):
"""should return localized URL"""
assert (
self._render('en-US') == 'https://itunes.apple.com/us'
'/app/firefox-private-safe-browser/id989804926')
assert (
self._render('es-ES') == 'https://itunes.apple.com/es'
'/app/firefox-private-safe-browser/id989804926')
assert (
self._render('ja') == 'https://itunes.apple.com/jp'
'/app/firefox-private-safe-browser/id989804926')
def test_firefox_ios_url_param(self):
"""should return default or localized URL with ct param"""
assert self._render('', 'mozorg') == (
'https://itunes.apple.com'
'/app/firefox-private-safe-browser/id989804926?ct=mozorg')
assert self._render('en-US', 'mozorg') == (
'https://itunes.apple.com/us'
'/app/firefox-private-safe-browser/id989804926?ct=mozorg')
assert self._render('es-ES', 'mozorg') == (
'https://itunes.apple.com/es'
'/app/firefox-private-safe-browser/id989804926?ct=mozorg')
# from jingo
def test_f():
s = render('{{ "{0} : {z}"|f("a", z="b") }}')
assert s == 'a : b'
def test_f_unicode():
s = render('{{ "foo {0}"|f(bar) }}', {'bar': u'bar\xe9'})
assert s == u'foo bar\xe9'
s = render('{{ t|f(bar) }}', {'t': u'\xe9 {0}', 'bar': 'baz'})
assert s == u'\xe9 baz'
format_string = 'Hello <b>{0}</b>'
format_markup = Markup(format_string)
val_string = '<em>Steve</em>'
val_markup = Markup(val_string)
@pytest.mark.parametrize('f, v', [
(format_string, val_string),
(format_string, val_markup),
(format_markup, val_string),
(format_markup, val_markup),
])
def test_f_markup(f, v):
expect = 'Hello <b><em>Steve</em></b>'
s = render('{{ fmt|f(val) }}', {'fmt': f, 'val': v})
assert expect == s
def test_datetime():
time = datetime(2009, 12, 25, 10, 11, 12)
s = render('{{ d|datetime }}', {'d': time})
assert s == 'December 25, 2009'
s = render('{{ d|datetime("%Y-%m-%d %H:%M:%S") }}', {'d': time})
assert s == '2009-12-25 10:11:12'
s = render('{{ None|datetime }}')
assert s == ''
def test_datetime_unicode():
fmt = u"%Y 年 %m 月 %e 日"
misc.datetime(datetime.now(), fmt)
def test_ifeq():
eq_context = {'a': 1, 'b': 1}
neq_context = {'a': 1, 'b': 2}
s = render('{{ a|ifeq(b, "<b>something</b>") }}', eq_context)
assert s == '<b>something</b>'
s = render('{{ a|ifeq(b, "<b>something</b>") }}', neq_context)
assert s == ''
def test_csrf():
s = render('{{ csrf() }}', {'csrf_token': 'fffuuu'})
csrf = '<input type="hidden" name="csrfmiddlewaretoken" value="fffuuu">'
assert csrf in s
class TestAppStoreURL(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
product = 'lockwise'
return render("{{ app_store_url('%s') }}" % product,
{'request': req})
def test_app_store_url_no_locale(self):
"""No locale, fallback to default URL"""
assert (self._render('') == 'https://itunes.apple.com/app/id1314000270?mt=8')
def test_app_store_url_default(self):
"""should fallback to default URL"""
assert (self._render('ar') == 'https://itunes.apple.com/app/id1314000270?mt=8')
assert (self._render('zu') == 'https://itunes.apple.com/app/id1314000270?mt=8')
def test_app_store_url_localized(self):
"""should return localized URL"""
assert (self._render('en-US') == 'https://itunes.apple.com/us/app/id1314000270?mt=8')
assert (self._render('es-ES') == 'https://itunes.apple.com/es/app/id1314000270?mt=8')
assert (self._render('de') == 'https://itunes.apple.com/de/app/id1314000270?mt=8')
class TestPlayStoreURL(TestCase):
rf = RequestFactory()
def _render(self, locale):
req = self.rf.get('/')
req.locale = locale
product = 'lockwise'
return render("{{ play_store_url('%s') }}" % product,
{'request': req})
def test_play_store_url_localized(self):
"""should return localized URL"""
assert (self._render('en-US') == 'https://play.google.com/store/apps/details?id=mozilla.lockbox&hl=en')
assert (self._render('es-ES') == 'https://play.google.com/store/apps/details?id=mozilla.lockbox&hl=es')
assert (self._render('de') == 'https://play.google.com/store/apps/details?id=mozilla.lockbox&hl=de')
class TestStructuredDataID(TestCase):
rf = RequestFactory()
def _render(self, locale, domain=None):
req = self.rf.get('/')
req.locale = locale
sd_id = 'firefoxbrowser'
if domain:
return render("{{{{ structured_data_id('{0}', '{1}') }}}}".format(
sd_id, domain), {'request': req})
return render("{{ structured_data_id('%s') }}" % sd_id,
{'request': req})
def test_structured_data_localized_id(self):
"""should return localized id"""
assert (self._render('en-US') == 'https://www.mozilla.org/#firefoxbrowser')
assert (self._render('es-ES') == 'https://www.mozilla.org/#firefoxbrowser-es-es')
assert (self._render('de') == 'https://www.mozilla.org/#firefoxbrowser-de')
def test_structured_data_custom_domain_id(self):
"""should return id for a custom domain"""
domain = 'https://foundation.mozilla.org'
assert (self._render('en-US', domain) == 'https://foundation.mozilla.org/#firefoxbrowser')
assert (self._render('es-ES', domain) == 'https://foundation.mozilla.org/#firefoxbrowser-es-es')
assert (self._render('de', domain) == 'https://foundation.mozilla.org/#firefoxbrowser-de')
class TestLangShort(TestCase):
rf = RequestFactory()
def _render(self, locale, domain=None):
req = self.rf.get('/')
req.locale = locale
return render("{{ lang_short() }}", {'request': req})
def test_shortened_locales(self):
"""should return a shortened locale code"""
assert (self._render('en-US') == 'en')
assert (self._render('es-ES') == 'es')
assert (self._render('de') == 'de')
class TestFirefoxAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ firefox_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ firefox_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_firefox_ios_adjust_url(self):
"""Firefox for mobile with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/2uo1qc?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Ffirefox-private-safe-browser%2Fid989804926'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_firefox_ios_adjust_url_creative(self):
"""Firefox for mobile with an App Store URL redirect and creative param"""
assert (
self._render('de', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/2uo1qc?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Ffirefox-private-safe-browser%2Fid989804926'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_firefox_android_adjust_url(self):
"""Firefox for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/2uo1qc?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dorg.mozilla.firefox'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_firefox_no_redirect_adjust_url(self):
"""Firefox for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/2uo1qc?'
'campaign=www.mozilla.org&adgroup=test-page')
class TestFocusAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ focus_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ focus_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_focus_ios_adjust_url(self):
"""Firefox Focus with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/b8s7qo?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Ffirefox-focus-privacy-browser%2Fid1055677337'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_focus_ios_adjust_url_creative(self):
"""Firefox Focus with an App Store URL redirect and creative param"""
assert (
self._render('fr', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/b8s7qo?'
'redirect=https%3A%2F%2Fitunes.apple.com%2Ffr%2Fapp%2Ffirefox-focus-privacy-browser%2Fid1055677337'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_focus_android_adjust_url(self):
"""Firefox Focus for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/b8s7qo?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dorg.mozilla.focus'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_focus_no_redirect_adjust_url(self):
"""Firefox Focus for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/b8s7qo?'
'campaign=www.mozilla.org&adgroup=test-page')
def test_klar_ios_adjust_url(self):
"""Firefox Klar with an App Store URL redirect"""
assert (
self._render('de', 'ios', 'test-page') == 'https://app.adjust.com/jfcx5x?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Fklar-by-firefox%2Fid1073435754'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_klar_android_adjust_url(self):
"""Firefox Klar for mobile with a Play Store redirect"""
assert (
self._render('de', 'android', 'test-page') == 'https://app.adjust.com/jfcx5x?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dorg.mozilla.klar'
'&campaign=www.mozilla.org&adgroup=test-page')
class TestLockwiseAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ lockwise_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ lockwise_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_lockwise_ios_adjust_url(self):
"""Firefox Lockwise for mobile with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/6tteyjo?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Fid1314000270%3Fmt%3D8'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_lockwise_ios_adjust_url_creative(self):
"""Firefox Lockwise for mobile with an App Store URL redirect and creative param"""
assert (
self._render('de', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/6tteyjo'
'?redirect=https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Fid1314000270%3Fmt%3D8'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_lockwise_android_adjust_url(self):
"""Firefox Lockwise for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/6tteyjo?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dmozilla.lockbox'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_lockwise_no_redirect_adjust_url(self):
"""Firefox Lockwise for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/6tteyjo'
'?campaign=www.mozilla.org&adgroup=test-page')
class TestPocketAdjustUrl(TestCase):
rf = RequestFactory()
def _render(self, locale, redirect, adgroup, creative=None):
req = self.rf.get('/')
req.locale = locale
if creative:
return render("{{{{ pocket_adjust_url('{0}', '{1}', '{2}') }}}}".format(
redirect, adgroup, creative), {'request': req})
return render("{{{{ pocket_adjust_url('{0}', '{1}') }}}}".format(
redirect, adgroup), {'request': req})
def test_pocket_ios_adjust_url(self):
"""Pocket for mobile with an App Store URL redirect"""
assert (
self._render('en-US', 'ios', 'test-page') == 'https://app.adjust.com/m54twk?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fus%2Fapp%2Fpocket-save-read-grow%2Fid309601447'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_pocket_ios_adjust_url_creative(self):
"""Pocket for mobile with an App Store URL redirect and creative param"""
assert (
self._render('de', 'ios', 'test-page', 'experiment-name') == 'https://app.adjust.com/m54twk?redirect='
'https%3A%2F%2Fitunes.apple.com%2Fde%2Fapp%2Fpocket-save-read-grow%2Fid309601447'
'&campaign=www.mozilla.org&adgroup=test-page&creative=experiment-name')
def test_pocket_android_adjust_url(self):
"""Pocket for mobile with a Play Store redirect"""
assert (
self._render('en-US', 'android', 'test-page') == 'https://app.adjust.com/m54twk?redirect='
'https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dcom.ideashower.readitlater.pro'
'&campaign=www.mozilla.org&adgroup=test-page')
def test_pocket_no_redirect_adjust_url(self):
"""Pocket for mobile with no redirect"""
assert (
self._render('en-US', None, 'test-page') == 'https://app.adjust.com/m54twk?'
'campaign=www.mozilla.org&adgroup=test-page')
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
class TestPocketFxAButton(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, button_text, class_name=None, is_button_class=True, include_metrics=True,
optional_parameters=None, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ pocket_fxa_button('{0}', '{1}', '{2}', {3}, {4}, {5}, {6}) }}}}".format(
entrypoint, button_text, class_name, is_button_class, include_metrics,
optional_parameters, optional_attributes), {'request': req})
def test_pocket_fxa_button(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-pocket', button_text='Try Pocket Now',
class_name='pocket-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'s': 'ffpocket', 'foo': 'bar'},
optional_attributes={'data-cta-text': 'Try Pocket Now', 'data-cta-type': 'activate pocket',
'data-cta-position': 'primary'})
expected = (
u'<a href="https://getpocket.com/ff_signup?entrypoint=mozilla.org-firefox-pocket&form_type=button'
u'&utm_source=mozilla.org-firefox-pocket&utm_medium=referral&s=ffpocket&foo=bar" data-action="https://accounts.firefox.com/" '
u'class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product pocket-main-cta-button" '
u'data-cta-text="Try Pocket Now" data-cta-type="activate pocket" data-cta-position="primary">Try Pocket Now</a>')
self.assertEqual(markup, expected)
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
class TestMonitorFxAButton(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, button_text, class_name=None, is_button_class=False, include_metrics=True,
optional_parameters=None, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ monitor_fxa_button('{0}', '{1}', '{2}', {3}, {4}, {5}, {6}) }}}}".format(
entrypoint, button_text, class_name, is_button_class, include_metrics,
optional_parameters, optional_attributes), {'request': req})
def test_monitor_fxa_button(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-accounts', button_text='Sign In to Monitor',
class_name='monitor-main-cta-button', is_button_class=False, include_metrics=True,
optional_parameters={'utm_campaign': 'skyline'},
optional_attributes={'data-cta-text': 'Sign In to Monitor', 'data-cta-type':
'fxa-monitor', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://monitor.firefox.com/oauth/init?entrypoint=mozilla.org-firefox-accounts&form_type=button'
u'&utm_source=mozilla.org-firefox-accounts&utm_medium=referral&utm_campaign=skyline" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button '
u'monitor-main-cta-button" data-cta-text="Sign In to Monitor" data-cta-type="fxa-monitor" '
u'data-cta-position="primary">Sign In to Monitor</a>')
self.assertEqual(markup, expected)
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
@override_settings(FXA_ENDPOINT_MOZILLAONLINE=TEST_FXA_MOZILLAONLINE_ENDPOINT)
class TestFxAButton(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, button_text, action='signup', class_name=None, is_button_class=True, include_metrics=True,
optional_parameters=None, optional_attributes=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ fxa_button('{0}', '{1}', '{2}', '{3}', {4}, {5}, {6}, {7}) }}}}".format(
entrypoint, button_text, action, class_name, is_button_class, include_metrics,
optional_parameters, optional_attributes), {'request': req})
def test_fxa_button_signup(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', button_text='Sign Up', action='signup',
class_name='fxa-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'utm_campaign': 'whatsnew73'},
optional_attributes={'data-cta-text': 'Sign Up', 'data-cta-type':
'fxa-sync', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://accounts.firefox.com/signup?entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product '
u'fxa-main-cta-button" data-cta-text="Sign Up" data-cta-type="fxa-sync" data-cta-position="primary" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/signup?entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/">Sign Up</a>')
self.assertEqual(markup, expected)
def test_fxa_button_signin(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', button_text='Sign In', action='signin',
class_name='fxa-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'utm_campaign': 'whatsnew73'},
optional_attributes={'data-cta-text': 'Sign In', 'data-cta-type':
'fxa-sync', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://accounts.firefox.com/signin?entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product '
u'fxa-main-cta-button" data-cta-text="Sign In" data-cta-type="fxa-sync" data-cta-position="primary" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/signin?entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/">Sign In</a>')
self.assertEqual(markup, expected)
def test_fxa_button_email(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', button_text='Sign Up', action='email',
class_name='fxa-main-cta-button', is_button_class=True, include_metrics=True,
optional_parameters={'utm_campaign': 'whatsnew73'},
optional_attributes={'data-cta-text': 'Sign Up', 'data-cta-type':
'fxa-sync', 'data-cta-position': 'primary'})
expected = (
u'<a href="https://accounts.firefox.com/?action=email&entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-action="https://accounts.firefox.com/" class="js-fxa-cta-link js-fxa-product-button mzp-c-button mzp-t-product '
u'fxa-main-cta-button" data-cta-text="Sign Up" data-cta-type="fxa-sync" data-cta-position="primary" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/?action=email&entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/">Sign Up</a>')
self.assertEqual(markup, expected)
@override_settings(FXA_ENDPOINT=TEST_FXA_ENDPOINT)
@override_settings(FXA_ENDPOINT_MOZILLAONLINE=TEST_FXA_MOZILLAONLINE_ENDPOINT)
class TestFxALinkFragment(TestCase):
rf = RequestFactory()
def _render(self, entrypoint, action='signup', optional_parameters=None):
req = self.rf.get('/')
req.locale = 'en-US'
return render("{{{{ fxa_link_fragment('{0}', '{1}', {2}) }}}}".format(
entrypoint, action, optional_parameters), {'request': req})
def test_fxa_button_signup(self):
"""Should return expected markup"""
markup = self._render(entrypoint='mozilla.org-firefox-whatsnew73', action='signup',
optional_parameters={'utm_campaign': 'whatsnew73'})
expected = (
u'href="https://accounts.firefox.com/signup?entrypoint=mozilla.org-firefox-whatsnew73&form_type=button'
u'&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-link="https://accounts.firefox.com.cn/signup?entrypoint=mozilla.org-firefox-whatsnew73'
u'&form_type=button&utm_source=mozilla.org-firefox-whatsnew73&utm_medium=referral&utm_campaign=whatsnew73" '
u'data-mozillaonline-action="https://accounts.firefox.com.cn/"')
self.assertEqual(markup, expected)
|
hoosteeno/bedrock
|
bedrock/mozorg/tests/test_helper_misc.py
|
Python
|
mpl-2.0
| 51,641
|
# ext/serializer.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
allowing "contextual" deserialization.
Any SQLAlchemy query structure, either based on sqlalchemy.sql.*
or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session
etc. which are referenced by the structure are not persisted in serialized
form, but are instead re-associated with the query structure
when it is deserialized.
Usage is nearly the same as that of the standard Python pickle module::
from sqlalchemy.ext.serializer import loads, dumps
metadata = MetaData(bind=some_engine)
Session = scoped_session(sessionmaker())
# ... define mappers
query = Session.query(MyClass).
filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
# pickle the query
serialized = dumps(query)
# unpickle. Pass in metadata + scoped_session
query2 = loads(serialized, metadata, Session)
print query2.all()
Similar restrictions as when using raw pickle apply; mapped classes must be
themselves be pickleable, meaning they are importable from a module-level
namespace.
The serializer module is only appropriate for query structures. It is not
needed for:
* instances of user-defined classes. These contain no references to engines,
sessions or expression constructs in the typical case and can be serialized
directly.
* Table metadata that is to be loaded entirely from the serialized structure
(i.e. is not already declared in the application). Regular
pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object,
typically one which was reflected from an existing database at some previous
point in time. The serializer module is specifically for the opposite case,
where the Table metadata is already present in memory.
"""
import re
from .. import Column
from .. import Table
from ..engine import Engine
from ..orm import class_mapper
from ..orm.attributes import QueryableAttribute
from ..orm.interfaces import MapperProperty
from ..orm.mapper import Mapper
from ..orm.session import Session
from ..util import b64decode
from ..util import b64encode
from ..util import byte_buffer
from ..util import pickle
from ..util import text_type
__all__ = ["Serializer", "Deserializer", "dumps", "loads"]
def Serializer(*args, **kw):
pickler = pickle.Pickler(*args, **kw)
def persistent_id(obj):
# print "serializing:", repr(obj)
if isinstance(obj, QueryableAttribute):
cls = obj.impl.class_
key = obj.impl.key
id_ = "attribute:" + key + ":" + b64encode(pickle.dumps(cls))
elif isinstance(obj, Mapper) and not obj.non_primary:
id_ = "mapper:" + b64encode(pickle.dumps(obj.class_))
elif isinstance(obj, MapperProperty) and not obj.parent.non_primary:
id_ = (
"mapperprop:"
+ b64encode(pickle.dumps(obj.parent.class_))
+ ":"
+ obj.key
)
elif isinstance(obj, Table):
id_ = "table:" + text_type(obj.key)
elif isinstance(obj, Column) and isinstance(obj.table, Table):
id_ = (
"column:" + text_type(obj.table.key) + ":" + text_type(obj.key)
)
elif isinstance(obj, Session):
id_ = "session:"
elif isinstance(obj, Engine):
id_ = "engine:"
else:
return None
return id_
pickler.persistent_id = persistent_id
return pickler
our_ids = re.compile(
r"(mapperprop|mapper|table|column|session|attribute|engine):(.*)"
)
def Deserializer(file, metadata=None, scoped_session=None, engine=None):
unpickler = pickle.Unpickler(file)
def get_engine():
if engine:
return engine
elif scoped_session and scoped_session().bind:
return scoped_session().bind
elif metadata and metadata.bind:
return metadata.bind
else:
return None
def persistent_load(id_):
m = our_ids.match(text_type(id_))
if not m:
return None
else:
type_, args = m.group(1, 2)
if type_ == "attribute":
key, clsarg = args.split(":")
cls = pickle.loads(b64decode(clsarg))
return getattr(cls, key)
elif type_ == "mapper":
cls = pickle.loads(b64decode(args))
return class_mapper(cls)
elif type_ == "mapperprop":
mapper, keyname = args.split(":")
cls = pickle.loads(b64decode(mapper))
return class_mapper(cls).attrs[keyname]
elif type_ == "table":
return metadata.tables[args]
elif type_ == "column":
table, colname = args.split(":")
return metadata.tables[table].c[colname]
elif type_ == "session":
return scoped_session()
elif type_ == "engine":
return get_engine()
else:
raise Exception("Unknown token: %s" % type_)
unpickler.persistent_load = persistent_load
return unpickler
def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL):
buf = byte_buffer()
pickler = Serializer(buf, protocol)
pickler.dump(obj)
return buf.getvalue()
def loads(data, metadata=None, scoped_session=None, engine=None):
buf = byte_buffer(data)
unpickler = Deserializer(buf, metadata, scoped_session, engine)
return unpickler.load()
|
skarra/PRS
|
libs/sqlalchemy/ext/serializer.py
|
Python
|
agpl-3.0
| 5,784
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-04-24 15:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("notification", "0004_delete_old_email_notification")]
operations = [
migrations.RenameModel(
old_name="TempEmailNotification", new_name="EmailNotification"
)
]
|
project-callisto/callisto-core
|
callisto_core/notification/migrations/0005_rename_to_emailnotification.py
|
Python
|
agpl-3.0
| 408
|
#!/usr/bin/env python
from connection import Conn as Connection
|
Polychart/builder
|
server/polychartQuery/csv/__init__.py
|
Python
|
agpl-3.0
| 65
|
import pytest
from django.test import RequestFactory
from django.core.urlresolvers import reverse
from api.transaction.serializers import TransactionSerializer
from iati.transaction.factories import TransactionFactory
class TestTransactionSerializer:
"""
Test if transaction model is serialized correctly.
"""
request_dummy = RequestFactory().get('/')
transaction = TransactionFactory.build(id=1)
def serialize_test_transaction(self, transaction=None):
"""
Helper method simplifies tests of serialized data
"""
return TransactionSerializer(
transaction or self.transaction,
context={'request': self.request_dummy},
)
@pytest.mark.django_db
def test_transaction_serializer_has_correct_url(self):
"""
Test if transactions serialized properly
"""
serializer = self.serialize_test_transaction()
assert 'url' in serializer.data.keys(), \
"""serialized data should include url"""
expected_url = "http://testserver{reverse}".format(
reverse=reverse('transactions:detail',
args=(self.transaction.id,)))
assert serializer.data.get('url', '') == expected_url, \
"""serialized url should point to transaction detail page"""
|
tokatikato/OIPA
|
OIPA/api/transaction/tests/test_transaction_serializer.py
|
Python
|
agpl-3.0
| 1,318
|
#-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import random
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
from werkzeug.debug import DebuggedApplication
if os.name == 'posix':
# Unix only for workers
import fcntl
import resource
import psutil
else:
# Windows shim
signal.SIGHUP = -1
# Optional process names for workers
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools import stripped_sys_argv, dumpstacks, log_ormcache_stats
_logger = logging.getLogger(__name__)
try:
import watchdog
from watchdog.observers import Observer
from watchdog.events import FileCreatedEvent, FileModifiedEvent, FileMovedEvent
except ImportError:
watchdog = None
SLEEP_INTERVAL = 60 # 1 min
def memory_info(process):
""" psutil < 2.0 does not have memory_info, >= 3.0 does not have
get_memory_info """
return (getattr(process, 'memory_info', None) or process.get_memory_info)()
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class LoggingBaseWSGIServerMixIn(object):
def handle_error(self, request, client_address):
t, e, _ = sys.exc_info()
if t == socket.error and e.errno == errno.EPIPE:
# broken pipe, ignore error
return
_logger.exception('Exception happened during processing of request from %s', client_address)
class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# FileSystem Watcher for autoreload and cache invalidation
#----------------------------------------------------------
class FSWatcher(object):
def __init__(self):
self.observer = Observer()
for path in openerp.modules.module.ad_paths:
_logger.info('Watching addons folder %s', path)
self.observer.schedule(self, path, recursive=True)
def dispatch(self, event):
if isinstance(event, (FileCreatedEvent, FileModifiedEvent, FileMovedEvent)):
if not event.is_directory:
path = getattr(event, 'dest_path', event.src_path)
if path.endswith('.py'):
try:
source = open(path, 'rb').read() + '\n'
compile(source, path, 'exec')
except SyntaxError:
_logger.error('autoreload: python code change detected, SyntaxError in %s', path)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def start(self):
self.observer.start()
_logger.info('AutoReload watcher running')
def stop(self):
self.observer.stop()
self.observer.join()
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
if e.errno == errno.EBADF:
# Werkzeug > 0.9.6 closes the socket itself (see commit
# https://github.com/mitsuhiko/werkzeug/commit/4d8ca089)
return
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.iteritems():
while registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if test_mode or (config['xmlrpc'] and not stop):
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
try:
self.httpd.serve_forever()
except:
_logger.exception("Evented Service (longpolling): uncaught error during main loop")
raise
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen([sys.executable] + nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid == self.long_polling_pid:
self.long_polling_pid = None
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGUSR1:
# log ormcache stats on kill -SIGUSR1
log_ormcache_stats()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
if config['xmlrpc']:
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
if not self.long_polling_pid:
self.long_polling_spawn()
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGINT)
while self.workers and time.time() < limit:
try:
self.process_signals()
except KeyboardInterrupt:
_logger.info("Forced shutdown.")
break
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = memory_info(psutil.Process(os.getpid()))
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s, registry count: %s.",
self.pid, self.request_count,
len(openerp.modules.registry.RegistryManager.registries))
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.list_dbs(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = memory_info(psutil.Process(os.getpid()))
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = memory_info(psutil.Process(os.getpid()))
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
if updated_modules:
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest2.TestSuite()
for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = openerp.modules.module.TestStream()
result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = openerp.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = RegistryManager.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
with openerp.api.Environment.manage():
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
watcher = None
if config['dev_mode']:
if watchdog:
watcher = FSWatcher()
watcher.start()
else:
_logger.warning("'watchdog' module not installed. Code autoreload feature is disabled")
server.app = DebuggedApplication(server.app, evalex=True)
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
if watcher:
watcher.stop()
_reexec()
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
|
tvtsoft/odoo8
|
openerp/service/server.py
|
Python
|
agpl-3.0
| 35,763
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_purchase_delivery
|
OCA/purchase-workflow
|
purchase_propagate_qty/tests/__init__.py
|
Python
|
agpl-3.0
| 107
|