repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
yousafsyed/casperjs
|
refs/heads/master
|
bin/Lib/lib2to3/tests/data/infinite_recursion.py
|
341
|
# This file is used to verify that 2to3 falls back to a slower, iterative pattern matching
# scheme in the event that the faster recursive system fails due to infinite recursion.
from ctypes import *
STRING = c_char_p
OSUnknownByteOrder = 0
UIT_PROMPT = 1
P_PGID = 2
P_PID = 1
UIT_ERROR = 5
UIT_INFO = 4
UIT_NONE = 0
P_ALL = 0
UIT_VERIFY = 2
OSBigEndian = 2
UIT_BOOLEAN = 3
OSLittleEndian = 1
__darwin_nl_item = c_int
__darwin_wctrans_t = c_int
__darwin_wctype_t = c_ulong
__int8_t = c_byte
__uint8_t = c_ubyte
__int16_t = c_short
__uint16_t = c_ushort
__int32_t = c_int
__uint32_t = c_uint
__int64_t = c_longlong
__uint64_t = c_ulonglong
__darwin_intptr_t = c_long
__darwin_natural_t = c_uint
__darwin_ct_rune_t = c_int
class __mbstate_t(Union):
pass
__mbstate_t._pack_ = 4
__mbstate_t._fields_ = [
('__mbstate8', c_char * 128),
('_mbstateL', c_longlong),
]
assert sizeof(__mbstate_t) == 128, sizeof(__mbstate_t)
assert alignment(__mbstate_t) == 4, alignment(__mbstate_t)
__darwin_mbstate_t = __mbstate_t
__darwin_ptrdiff_t = c_int
__darwin_size_t = c_ulong
__darwin_va_list = STRING
__darwin_wchar_t = c_int
__darwin_rune_t = __darwin_wchar_t
__darwin_wint_t = c_int
__darwin_clock_t = c_ulong
__darwin_socklen_t = __uint32_t
__darwin_ssize_t = c_long
__darwin_time_t = c_long
sig_atomic_t = c_int
class sigcontext(Structure):
pass
sigcontext._fields_ = [
('sc_onstack', c_int),
('sc_mask', c_int),
('sc_eax', c_uint),
('sc_ebx', c_uint),
('sc_ecx', c_uint),
('sc_edx', c_uint),
('sc_edi', c_uint),
('sc_esi', c_uint),
('sc_ebp', c_uint),
('sc_esp', c_uint),
('sc_ss', c_uint),
('sc_eflags', c_uint),
('sc_eip', c_uint),
('sc_cs', c_uint),
('sc_ds', c_uint),
('sc_es', c_uint),
('sc_fs', c_uint),
('sc_gs', c_uint),
]
assert sizeof(sigcontext) == 72, sizeof(sigcontext)
assert alignment(sigcontext) == 4, alignment(sigcontext)
u_int8_t = c_ubyte
u_int16_t = c_ushort
u_int32_t = c_uint
u_int64_t = c_ulonglong
int32_t = c_int
register_t = int32_t
user_addr_t = u_int64_t
user_size_t = u_int64_t
int64_t = c_longlong
user_ssize_t = int64_t
user_long_t = int64_t
user_ulong_t = u_int64_t
user_time_t = int64_t
syscall_arg_t = u_int64_t
# values for unnamed enumeration
class aes_key_st(Structure):
pass
aes_key_st._fields_ = [
('rd_key', c_ulong * 60),
('rounds', c_int),
]
assert sizeof(aes_key_st) == 244, sizeof(aes_key_st)
assert alignment(aes_key_st) == 4, alignment(aes_key_st)
AES_KEY = aes_key_st
class asn1_ctx_st(Structure):
pass
asn1_ctx_st._fields_ = [
('p', POINTER(c_ubyte)),
('eos', c_int),
('error', c_int),
('inf', c_int),
('tag', c_int),
('xclass', c_int),
('slen', c_long),
('max', POINTER(c_ubyte)),
('q', POINTER(c_ubyte)),
('pp', POINTER(POINTER(c_ubyte))),
('line', c_int),
]
assert sizeof(asn1_ctx_st) == 44, sizeof(asn1_ctx_st)
assert alignment(asn1_ctx_st) == 4, alignment(asn1_ctx_st)
ASN1_CTX = asn1_ctx_st
class asn1_object_st(Structure):
pass
asn1_object_st._fields_ = [
('sn', STRING),
('ln', STRING),
('nid', c_int),
('length', c_int),
('data', POINTER(c_ubyte)),
('flags', c_int),
]
assert sizeof(asn1_object_st) == 24, sizeof(asn1_object_st)
assert alignment(asn1_object_st) == 4, alignment(asn1_object_st)
ASN1_OBJECT = asn1_object_st
class asn1_string_st(Structure):
pass
asn1_string_st._fields_ = [
('length', c_int),
('type', c_int),
('data', POINTER(c_ubyte)),
('flags', c_long),
]
assert sizeof(asn1_string_st) == 16, sizeof(asn1_string_st)
assert alignment(asn1_string_st) == 4, alignment(asn1_string_st)
ASN1_STRING = asn1_string_st
class ASN1_ENCODING_st(Structure):
pass
ASN1_ENCODING_st._fields_ = [
('enc', POINTER(c_ubyte)),
('len', c_long),
('modified', c_int),
]
assert sizeof(ASN1_ENCODING_st) == 12, sizeof(ASN1_ENCODING_st)
assert alignment(ASN1_ENCODING_st) == 4, alignment(ASN1_ENCODING_st)
ASN1_ENCODING = ASN1_ENCODING_st
class asn1_string_table_st(Structure):
pass
asn1_string_table_st._fields_ = [
('nid', c_int),
('minsize', c_long),
('maxsize', c_long),
('mask', c_ulong),
('flags', c_ulong),
]
assert sizeof(asn1_string_table_st) == 20, sizeof(asn1_string_table_st)
assert alignment(asn1_string_table_st) == 4, alignment(asn1_string_table_st)
ASN1_STRING_TABLE = asn1_string_table_st
class ASN1_TEMPLATE_st(Structure):
pass
ASN1_TEMPLATE_st._fields_ = [
]
ASN1_TEMPLATE = ASN1_TEMPLATE_st
class ASN1_ITEM_st(Structure):
pass
ASN1_ITEM = ASN1_ITEM_st
ASN1_ITEM_st._fields_ = [
]
class ASN1_TLC_st(Structure):
pass
ASN1_TLC = ASN1_TLC_st
ASN1_TLC_st._fields_ = [
]
class ASN1_VALUE_st(Structure):
pass
ASN1_VALUE_st._fields_ = [
]
ASN1_VALUE = ASN1_VALUE_st
ASN1_ITEM_EXP = ASN1_ITEM
class asn1_type_st(Structure):
pass
class N12asn1_type_st4DOLLAR_11E(Union):
pass
ASN1_BOOLEAN = c_int
ASN1_INTEGER = asn1_string_st
ASN1_ENUMERATED = asn1_string_st
ASN1_BIT_STRING = asn1_string_st
ASN1_OCTET_STRING = asn1_string_st
ASN1_PRINTABLESTRING = asn1_string_st
ASN1_T61STRING = asn1_string_st
ASN1_IA5STRING = asn1_string_st
ASN1_GENERALSTRING = asn1_string_st
ASN1_BMPSTRING = asn1_string_st
ASN1_UNIVERSALSTRING = asn1_string_st
ASN1_UTCTIME = asn1_string_st
ASN1_GENERALIZEDTIME = asn1_string_st
ASN1_VISIBLESTRING = asn1_string_st
ASN1_UTF8STRING = asn1_string_st
N12asn1_type_st4DOLLAR_11E._fields_ = [
('ptr', STRING),
('boolean', ASN1_BOOLEAN),
('asn1_string', POINTER(ASN1_STRING)),
('object', POINTER(ASN1_OBJECT)),
('integer', POINTER(ASN1_INTEGER)),
('enumerated', POINTER(ASN1_ENUMERATED)),
('bit_string', POINTER(ASN1_BIT_STRING)),
('octet_string', POINTER(ASN1_OCTET_STRING)),
('printablestring', POINTER(ASN1_PRINTABLESTRING)),
('t61string', POINTER(ASN1_T61STRING)),
('ia5string', POINTER(ASN1_IA5STRING)),
('generalstring', POINTER(ASN1_GENERALSTRING)),
('bmpstring', POINTER(ASN1_BMPSTRING)),
('universalstring', POINTER(ASN1_UNIVERSALSTRING)),
('utctime', POINTER(ASN1_UTCTIME)),
('generalizedtime', POINTER(ASN1_GENERALIZEDTIME)),
('visiblestring', POINTER(ASN1_VISIBLESTRING)),
('utf8string', POINTER(ASN1_UTF8STRING)),
('set', POINTER(ASN1_STRING)),
('sequence', POINTER(ASN1_STRING)),
]
assert sizeof(N12asn1_type_st4DOLLAR_11E) == 4, sizeof(N12asn1_type_st4DOLLAR_11E)
assert alignment(N12asn1_type_st4DOLLAR_11E) == 4, alignment(N12asn1_type_st4DOLLAR_11E)
asn1_type_st._fields_ = [
('type', c_int),
('value', N12asn1_type_st4DOLLAR_11E),
]
assert sizeof(asn1_type_st) == 8, sizeof(asn1_type_st)
assert alignment(asn1_type_st) == 4, alignment(asn1_type_st)
ASN1_TYPE = asn1_type_st
class asn1_method_st(Structure):
pass
asn1_method_st._fields_ = [
('i2d', CFUNCTYPE(c_int)),
('d2i', CFUNCTYPE(STRING)),
('create', CFUNCTYPE(STRING)),
('destroy', CFUNCTYPE(None)),
]
assert sizeof(asn1_method_st) == 16, sizeof(asn1_method_st)
assert alignment(asn1_method_st) == 4, alignment(asn1_method_st)
ASN1_METHOD = asn1_method_st
class asn1_header_st(Structure):
pass
asn1_header_st._fields_ = [
('header', POINTER(ASN1_OCTET_STRING)),
('data', STRING),
('meth', POINTER(ASN1_METHOD)),
]
assert sizeof(asn1_header_st) == 12, sizeof(asn1_header_st)
assert alignment(asn1_header_st) == 4, alignment(asn1_header_st)
ASN1_HEADER = asn1_header_st
class BIT_STRING_BITNAME_st(Structure):
pass
BIT_STRING_BITNAME_st._fields_ = [
('bitnum', c_int),
('lname', STRING),
('sname', STRING),
]
assert sizeof(BIT_STRING_BITNAME_st) == 12, sizeof(BIT_STRING_BITNAME_st)
assert alignment(BIT_STRING_BITNAME_st) == 4, alignment(BIT_STRING_BITNAME_st)
BIT_STRING_BITNAME = BIT_STRING_BITNAME_st
class bio_st(Structure):
pass
BIO = bio_st
bio_info_cb = CFUNCTYPE(None, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)
class bio_method_st(Structure):
pass
bio_method_st._fields_ = [
('type', c_int),
('name', STRING),
('bwrite', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
('bread', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
('bputs', CFUNCTYPE(c_int, POINTER(BIO), STRING)),
('bgets', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
('ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, c_long, c_void_p)),
('create', CFUNCTYPE(c_int, POINTER(BIO))),
('destroy', CFUNCTYPE(c_int, POINTER(BIO))),
('callback_ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, POINTER(bio_info_cb))),
]
assert sizeof(bio_method_st) == 40, sizeof(bio_method_st)
assert alignment(bio_method_st) == 4, alignment(bio_method_st)
BIO_METHOD = bio_method_st
class crypto_ex_data_st(Structure):
pass
class stack_st(Structure):
pass
STACK = stack_st
crypto_ex_data_st._fields_ = [
('sk', POINTER(STACK)),
('dummy', c_int),
]
assert sizeof(crypto_ex_data_st) == 8, sizeof(crypto_ex_data_st)
assert alignment(crypto_ex_data_st) == 4, alignment(crypto_ex_data_st)
CRYPTO_EX_DATA = crypto_ex_data_st
bio_st._fields_ = [
('method', POINTER(BIO_METHOD)),
('callback', CFUNCTYPE(c_long, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)),
('cb_arg', STRING),
('init', c_int),
('shutdown', c_int),
('flags', c_int),
('retry_reason', c_int),
('num', c_int),
('ptr', c_void_p),
('next_bio', POINTER(bio_st)),
('prev_bio', POINTER(bio_st)),
('references', c_int),
('num_read', c_ulong),
('num_write', c_ulong),
('ex_data', CRYPTO_EX_DATA),
]
assert sizeof(bio_st) == 64, sizeof(bio_st)
assert alignment(bio_st) == 4, alignment(bio_st)
class bio_f_buffer_ctx_struct(Structure):
pass
bio_f_buffer_ctx_struct._fields_ = [
('ibuf_size', c_int),
('obuf_size', c_int),
('ibuf', STRING),
('ibuf_len', c_int),
('ibuf_off', c_int),
('obuf', STRING),
('obuf_len', c_int),
('obuf_off', c_int),
]
assert sizeof(bio_f_buffer_ctx_struct) == 32, sizeof(bio_f_buffer_ctx_struct)
assert alignment(bio_f_buffer_ctx_struct) == 4, alignment(bio_f_buffer_ctx_struct)
BIO_F_BUFFER_CTX = bio_f_buffer_ctx_struct
class hostent(Structure):
pass
hostent._fields_ = [
]
class bf_key_st(Structure):
pass
bf_key_st._fields_ = [
('P', c_uint * 18),
('S', c_uint * 1024),
]
assert sizeof(bf_key_st) == 4168, sizeof(bf_key_st)
assert alignment(bf_key_st) == 4, alignment(bf_key_st)
BF_KEY = bf_key_st
class bignum_st(Structure):
pass
bignum_st._fields_ = [
('d', POINTER(c_ulong)),
('top', c_int),
('dmax', c_int),
('neg', c_int),
('flags', c_int),
]
assert sizeof(bignum_st) == 20, sizeof(bignum_st)
assert alignment(bignum_st) == 4, alignment(bignum_st)
BIGNUM = bignum_st
class bignum_ctx(Structure):
pass
bignum_ctx._fields_ = [
]
BN_CTX = bignum_ctx
class bn_blinding_st(Structure):
pass
bn_blinding_st._fields_ = [
('init', c_int),
('A', POINTER(BIGNUM)),
('Ai', POINTER(BIGNUM)),
('mod', POINTER(BIGNUM)),
('thread_id', c_ulong),
]
assert sizeof(bn_blinding_st) == 20, sizeof(bn_blinding_st)
assert alignment(bn_blinding_st) == 4, alignment(bn_blinding_st)
BN_BLINDING = bn_blinding_st
class bn_mont_ctx_st(Structure):
pass
bn_mont_ctx_st._fields_ = [
('ri', c_int),
('RR', BIGNUM),
('N', BIGNUM),
('Ni', BIGNUM),
('n0', c_ulong),
('flags', c_int),
]
assert sizeof(bn_mont_ctx_st) == 72, sizeof(bn_mont_ctx_st)
assert alignment(bn_mont_ctx_st) == 4, alignment(bn_mont_ctx_st)
BN_MONT_CTX = bn_mont_ctx_st
class bn_recp_ctx_st(Structure):
pass
bn_recp_ctx_st._fields_ = [
('N', BIGNUM),
('Nr', BIGNUM),
('num_bits', c_int),
('shift', c_int),
('flags', c_int),
]
assert sizeof(bn_recp_ctx_st) == 52, sizeof(bn_recp_ctx_st)
assert alignment(bn_recp_ctx_st) == 4, alignment(bn_recp_ctx_st)
BN_RECP_CTX = bn_recp_ctx_st
class buf_mem_st(Structure):
pass
buf_mem_st._fields_ = [
('length', c_int),
('data', STRING),
('max', c_int),
]
assert sizeof(buf_mem_st) == 12, sizeof(buf_mem_st)
assert alignment(buf_mem_st) == 4, alignment(buf_mem_st)
BUF_MEM = buf_mem_st
class cast_key_st(Structure):
pass
cast_key_st._fields_ = [
('data', c_ulong * 32),
('short_key', c_int),
]
assert sizeof(cast_key_st) == 132, sizeof(cast_key_st)
assert alignment(cast_key_st) == 4, alignment(cast_key_st)
CAST_KEY = cast_key_st
class comp_method_st(Structure):
pass
comp_method_st._fields_ = [
('type', c_int),
('name', STRING),
('init', CFUNCTYPE(c_int)),
('finish', CFUNCTYPE(None)),
('compress', CFUNCTYPE(c_int)),
('expand', CFUNCTYPE(c_int)),
('ctrl', CFUNCTYPE(c_long)),
('callback_ctrl', CFUNCTYPE(c_long)),
]
assert sizeof(comp_method_st) == 32, sizeof(comp_method_st)
assert alignment(comp_method_st) == 4, alignment(comp_method_st)
COMP_METHOD = comp_method_st
class comp_ctx_st(Structure):
pass
comp_ctx_st._fields_ = [
('meth', POINTER(COMP_METHOD)),
('compress_in', c_ulong),
('compress_out', c_ulong),
('expand_in', c_ulong),
('expand_out', c_ulong),
('ex_data', CRYPTO_EX_DATA),
]
assert sizeof(comp_ctx_st) == 28, sizeof(comp_ctx_st)
assert alignment(comp_ctx_st) == 4, alignment(comp_ctx_st)
COMP_CTX = comp_ctx_st
class CRYPTO_dynlock_value(Structure):
pass
CRYPTO_dynlock_value._fields_ = [
]
class CRYPTO_dynlock(Structure):
pass
CRYPTO_dynlock._fields_ = [
('references', c_int),
('data', POINTER(CRYPTO_dynlock_value)),
]
assert sizeof(CRYPTO_dynlock) == 8, sizeof(CRYPTO_dynlock)
assert alignment(CRYPTO_dynlock) == 4, alignment(CRYPTO_dynlock)
BIO_dummy = bio_st
CRYPTO_EX_new = CFUNCTYPE(c_int, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
CRYPTO_EX_free = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
CRYPTO_EX_dup = CFUNCTYPE(c_int, POINTER(CRYPTO_EX_DATA), POINTER(CRYPTO_EX_DATA), c_void_p, c_int, c_long, c_void_p)
class crypto_ex_data_func_st(Structure):
pass
crypto_ex_data_func_st._fields_ = [
('argl', c_long),
('argp', c_void_p),
('new_func', POINTER(CRYPTO_EX_new)),
('free_func', POINTER(CRYPTO_EX_free)),
('dup_func', POINTER(CRYPTO_EX_dup)),
]
assert sizeof(crypto_ex_data_func_st) == 20, sizeof(crypto_ex_data_func_st)
assert alignment(crypto_ex_data_func_st) == 4, alignment(crypto_ex_data_func_st)
CRYPTO_EX_DATA_FUNCS = crypto_ex_data_func_st
class st_CRYPTO_EX_DATA_IMPL(Structure):
pass
CRYPTO_EX_DATA_IMPL = st_CRYPTO_EX_DATA_IMPL
st_CRYPTO_EX_DATA_IMPL._fields_ = [
]
CRYPTO_MEM_LEAK_CB = CFUNCTYPE(c_void_p, c_ulong, STRING, c_int, c_int, c_void_p)
DES_cblock = c_ubyte * 8
const_DES_cblock = c_ubyte * 8
class DES_ks(Structure):
pass
class N6DES_ks3DOLLAR_9E(Union):
pass
N6DES_ks3DOLLAR_9E._fields_ = [
('cblock', DES_cblock),
('deslong', c_ulong * 2),
]
assert sizeof(N6DES_ks3DOLLAR_9E) == 8, sizeof(N6DES_ks3DOLLAR_9E)
assert alignment(N6DES_ks3DOLLAR_9E) == 4, alignment(N6DES_ks3DOLLAR_9E)
DES_ks._fields_ = [
('ks', N6DES_ks3DOLLAR_9E * 16),
]
assert sizeof(DES_ks) == 128, sizeof(DES_ks)
assert alignment(DES_ks) == 4, alignment(DES_ks)
DES_key_schedule = DES_ks
_ossl_old_des_cblock = c_ubyte * 8
class _ossl_old_des_ks_struct(Structure):
pass
class N23_ossl_old_des_ks_struct4DOLLAR_10E(Union):
pass
N23_ossl_old_des_ks_struct4DOLLAR_10E._fields_ = [
('_', _ossl_old_des_cblock),
('pad', c_ulong * 2),
]
assert sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 8, sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E)
assert alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 4, alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E)
_ossl_old_des_ks_struct._fields_ = [
('ks', N23_ossl_old_des_ks_struct4DOLLAR_10E),
]
assert sizeof(_ossl_old_des_ks_struct) == 8, sizeof(_ossl_old_des_ks_struct)
assert alignment(_ossl_old_des_ks_struct) == 4, alignment(_ossl_old_des_ks_struct)
_ossl_old_des_key_schedule = _ossl_old_des_ks_struct * 16
class dh_st(Structure):
pass
DH = dh_st
class dh_method(Structure):
pass
dh_method._fields_ = [
('name', STRING),
('generate_key', CFUNCTYPE(c_int, POINTER(DH))),
('compute_key', CFUNCTYPE(c_int, POINTER(c_ubyte), POINTER(BIGNUM), POINTER(DH))),
('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DH), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
('init', CFUNCTYPE(c_int, POINTER(DH))),
('finish', CFUNCTYPE(c_int, POINTER(DH))),
('flags', c_int),
('app_data', STRING),
]
assert sizeof(dh_method) == 32, sizeof(dh_method)
assert alignment(dh_method) == 4, alignment(dh_method)
DH_METHOD = dh_method
class engine_st(Structure):
pass
ENGINE = engine_st
dh_st._fields_ = [
('pad', c_int),
('version', c_int),
('p', POINTER(BIGNUM)),
('g', POINTER(BIGNUM)),
('length', c_long),
('pub_key', POINTER(BIGNUM)),
('priv_key', POINTER(BIGNUM)),
('flags', c_int),
('method_mont_p', STRING),
('q', POINTER(BIGNUM)),
('j', POINTER(BIGNUM)),
('seed', POINTER(c_ubyte)),
('seedlen', c_int),
('counter', POINTER(BIGNUM)),
('references', c_int),
('ex_data', CRYPTO_EX_DATA),
('meth', POINTER(DH_METHOD)),
('engine', POINTER(ENGINE)),
]
assert sizeof(dh_st) == 76, sizeof(dh_st)
assert alignment(dh_st) == 4, alignment(dh_st)
class dsa_st(Structure):
pass
DSA = dsa_st
class DSA_SIG_st(Structure):
pass
DSA_SIG_st._fields_ = [
('r', POINTER(BIGNUM)),
('s', POINTER(BIGNUM)),
]
assert sizeof(DSA_SIG_st) == 8, sizeof(DSA_SIG_st)
assert alignment(DSA_SIG_st) == 4, alignment(DSA_SIG_st)
DSA_SIG = DSA_SIG_st
class dsa_method(Structure):
pass
dsa_method._fields_ = [
('name', STRING),
('dsa_do_sign', CFUNCTYPE(POINTER(DSA_SIG), POINTER(c_ubyte), c_int, POINTER(DSA))),
('dsa_sign_setup', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BN_CTX), POINTER(POINTER(BIGNUM)), POINTER(POINTER(BIGNUM)))),
('dsa_do_verify', CFUNCTYPE(c_int, POINTER(c_ubyte), c_int, POINTER(DSA_SIG), POINTER(DSA))),
('dsa_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
('init', CFUNCTYPE(c_int, POINTER(DSA))),
('finish', CFUNCTYPE(c_int, POINTER(DSA))),
('flags', c_int),
('app_data', STRING),
]
assert sizeof(dsa_method) == 40, sizeof(dsa_method)
assert alignment(dsa_method) == 4, alignment(dsa_method)
DSA_METHOD = dsa_method
dsa_st._fields_ = [
('pad', c_int),
('version', c_long),
('write_params', c_int),
('p', POINTER(BIGNUM)),
('q', POINTER(BIGNUM)),
('g', POINTER(BIGNUM)),
('pub_key', POINTER(BIGNUM)),
('priv_key', POINTER(BIGNUM)),
('kinv', POINTER(BIGNUM)),
('r', POINTER(BIGNUM)),
('flags', c_int),
('method_mont_p', STRING),
('references', c_int),
('ex_data', CRYPTO_EX_DATA),
('meth', POINTER(DSA_METHOD)),
('engine', POINTER(ENGINE)),
]
assert sizeof(dsa_st) == 68, sizeof(dsa_st)
assert alignment(dsa_st) == 4, alignment(dsa_st)
class evp_pkey_st(Structure):
pass
class N11evp_pkey_st4DOLLAR_12E(Union):
pass
class rsa_st(Structure):
pass
N11evp_pkey_st4DOLLAR_12E._fields_ = [
('ptr', STRING),
('rsa', POINTER(rsa_st)),
('dsa', POINTER(dsa_st)),
('dh', POINTER(dh_st)),
]
assert sizeof(N11evp_pkey_st4DOLLAR_12E) == 4, sizeof(N11evp_pkey_st4DOLLAR_12E)
assert alignment(N11evp_pkey_st4DOLLAR_12E) == 4, alignment(N11evp_pkey_st4DOLLAR_12E)
evp_pkey_st._fields_ = [
('type', c_int),
('save_type', c_int),
('references', c_int),
('pkey', N11evp_pkey_st4DOLLAR_12E),
('save_parameters', c_int),
('attributes', POINTER(STACK)),
]
assert sizeof(evp_pkey_st) == 24, sizeof(evp_pkey_st)
assert alignment(evp_pkey_st) == 4, alignment(evp_pkey_st)
class env_md_st(Structure):
pass
class env_md_ctx_st(Structure):
pass
EVP_MD_CTX = env_md_ctx_st
env_md_st._fields_ = [
('type', c_int),
('pkey_type', c_int),
('md_size', c_int),
('flags', c_ulong),
('init', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
('update', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), c_void_p, c_ulong)),
('final', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(c_ubyte))),
('copy', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(EVP_MD_CTX))),
('cleanup', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
('sign', CFUNCTYPE(c_int)),
('verify', CFUNCTYPE(c_int)),
('required_pkey_type', c_int * 5),
('block_size', c_int),
('ctx_size', c_int),
]
assert sizeof(env_md_st) == 72, sizeof(env_md_st)
assert alignment(env_md_st) == 4, alignment(env_md_st)
EVP_MD = env_md_st
env_md_ctx_st._fields_ = [
('digest', POINTER(EVP_MD)),
('engine', POINTER(ENGINE)),
('flags', c_ulong),
('md_data', c_void_p),
]
assert sizeof(env_md_ctx_st) == 16, sizeof(env_md_ctx_st)
assert alignment(env_md_ctx_st) == 4, alignment(env_md_ctx_st)
class evp_cipher_st(Structure):
pass
class evp_cipher_ctx_st(Structure):
pass
EVP_CIPHER_CTX = evp_cipher_ctx_st
evp_cipher_st._fields_ = [
('nid', c_int),
('block_size', c_int),
('key_len', c_int),
('iv_len', c_int),
('flags', c_ulong),
('init', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_int)),
('do_cipher', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_uint)),
('cleanup', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX))),
('ctx_size', c_int),
('set_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
('get_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
('ctrl', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), c_int, c_int, c_void_p)),
('app_data', c_void_p),
]
assert sizeof(evp_cipher_st) == 52, sizeof(evp_cipher_st)
assert alignment(evp_cipher_st) == 4, alignment(evp_cipher_st)
class evp_cipher_info_st(Structure):
pass
EVP_CIPHER = evp_cipher_st
evp_cipher_info_st._fields_ = [
('cipher', POINTER(EVP_CIPHER)),
('iv', c_ubyte * 16),
]
assert sizeof(evp_cipher_info_st) == 20, sizeof(evp_cipher_info_st)
assert alignment(evp_cipher_info_st) == 4, alignment(evp_cipher_info_st)
EVP_CIPHER_INFO = evp_cipher_info_st
evp_cipher_ctx_st._fields_ = [
('cipher', POINTER(EVP_CIPHER)),
('engine', POINTER(ENGINE)),
('encrypt', c_int),
('buf_len', c_int),
('oiv', c_ubyte * 16),
('iv', c_ubyte * 16),
('buf', c_ubyte * 32),
('num', c_int),
('app_data', c_void_p),
('key_len', c_int),
('flags', c_ulong),
('cipher_data', c_void_p),
('final_used', c_int),
('block_mask', c_int),
('final', c_ubyte * 32),
]
assert sizeof(evp_cipher_ctx_st) == 140, sizeof(evp_cipher_ctx_st)
assert alignment(evp_cipher_ctx_st) == 4, alignment(evp_cipher_ctx_st)
class evp_Encode_Ctx_st(Structure):
pass
evp_Encode_Ctx_st._fields_ = [
('num', c_int),
('length', c_int),
('enc_data', c_ubyte * 80),
('line_num', c_int),
('expect_nl', c_int),
]
assert sizeof(evp_Encode_Ctx_st) == 96, sizeof(evp_Encode_Ctx_st)
assert alignment(evp_Encode_Ctx_st) == 4, alignment(evp_Encode_Ctx_st)
EVP_ENCODE_CTX = evp_Encode_Ctx_st
EVP_PBE_KEYGEN = CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), STRING, c_int, POINTER(ASN1_TYPE), POINTER(EVP_CIPHER), POINTER(EVP_MD), c_int)
class lhash_node_st(Structure):
pass
lhash_node_st._fields_ = [
('data', c_void_p),
('next', POINTER(lhash_node_st)),
('hash', c_ulong),
]
assert sizeof(lhash_node_st) == 12, sizeof(lhash_node_st)
assert alignment(lhash_node_st) == 4, alignment(lhash_node_st)
LHASH_NODE = lhash_node_st
LHASH_COMP_FN_TYPE = CFUNCTYPE(c_int, c_void_p, c_void_p)
LHASH_HASH_FN_TYPE = CFUNCTYPE(c_ulong, c_void_p)
LHASH_DOALL_FN_TYPE = CFUNCTYPE(None, c_void_p)
LHASH_DOALL_ARG_FN_TYPE = CFUNCTYPE(None, c_void_p, c_void_p)
class lhash_st(Structure):
pass
lhash_st._fields_ = [
('b', POINTER(POINTER(LHASH_NODE))),
('comp', LHASH_COMP_FN_TYPE),
('hash', LHASH_HASH_FN_TYPE),
('num_nodes', c_uint),
('num_alloc_nodes', c_uint),
('p', c_uint),
('pmax', c_uint),
('up_load', c_ulong),
('down_load', c_ulong),
('num_items', c_ulong),
('num_expands', c_ulong),
('num_expand_reallocs', c_ulong),
('num_contracts', c_ulong),
('num_contract_reallocs', c_ulong),
('num_hash_calls', c_ulong),
('num_comp_calls', c_ulong),
('num_insert', c_ulong),
('num_replace', c_ulong),
('num_delete', c_ulong),
('num_no_delete', c_ulong),
('num_retrieve', c_ulong),
('num_retrieve_miss', c_ulong),
('num_hash_comps', c_ulong),
('error', c_int),
]
assert sizeof(lhash_st) == 96, sizeof(lhash_st)
assert alignment(lhash_st) == 4, alignment(lhash_st)
LHASH = lhash_st
class MD2state_st(Structure):
pass
MD2state_st._fields_ = [
('num', c_int),
('data', c_ubyte * 16),
('cksm', c_uint * 16),
('state', c_uint * 16),
]
assert sizeof(MD2state_st) == 148, sizeof(MD2state_st)
assert alignment(MD2state_st) == 4, alignment(MD2state_st)
MD2_CTX = MD2state_st
class MD4state_st(Structure):
pass
MD4state_st._fields_ = [
('A', c_uint),
('B', c_uint),
('C', c_uint),
('D', c_uint),
('Nl', c_uint),
('Nh', c_uint),
('data', c_uint * 16),
('num', c_int),
]
assert sizeof(MD4state_st) == 92, sizeof(MD4state_st)
assert alignment(MD4state_st) == 4, alignment(MD4state_st)
MD4_CTX = MD4state_st
class MD5state_st(Structure):
pass
MD5state_st._fields_ = [
('A', c_uint),
('B', c_uint),
('C', c_uint),
('D', c_uint),
('Nl', c_uint),
('Nh', c_uint),
('data', c_uint * 16),
('num', c_int),
]
assert sizeof(MD5state_st) == 92, sizeof(MD5state_st)
assert alignment(MD5state_st) == 4, alignment(MD5state_st)
MD5_CTX = MD5state_st
class mdc2_ctx_st(Structure):
pass
mdc2_ctx_st._fields_ = [
('num', c_int),
('data', c_ubyte * 8),
('h', DES_cblock),
('hh', DES_cblock),
('pad_type', c_int),
]
assert sizeof(mdc2_ctx_st) == 32, sizeof(mdc2_ctx_st)
assert alignment(mdc2_ctx_st) == 4, alignment(mdc2_ctx_st)
MDC2_CTX = mdc2_ctx_st
class obj_name_st(Structure):
pass
obj_name_st._fields_ = [
('type', c_int),
('alias', c_int),
('name', STRING),
('data', STRING),
]
assert sizeof(obj_name_st) == 16, sizeof(obj_name_st)
assert alignment(obj_name_st) == 4, alignment(obj_name_st)
OBJ_NAME = obj_name_st
ASN1_TIME = asn1_string_st
ASN1_NULL = c_int
EVP_PKEY = evp_pkey_st
class x509_st(Structure):
pass
X509 = x509_st
class X509_algor_st(Structure):
pass
X509_ALGOR = X509_algor_st
class X509_crl_st(Structure):
pass
X509_CRL = X509_crl_st
class X509_name_st(Structure):
pass
X509_NAME = X509_name_st
class x509_store_st(Structure):
pass
X509_STORE = x509_store_st
class x509_store_ctx_st(Structure):
pass
X509_STORE_CTX = x509_store_ctx_st
engine_st._fields_ = [
]
class PEM_Encode_Seal_st(Structure):
pass
PEM_Encode_Seal_st._fields_ = [
('encode', EVP_ENCODE_CTX),
('md', EVP_MD_CTX),
('cipher', EVP_CIPHER_CTX),
]
assert sizeof(PEM_Encode_Seal_st) == 252, sizeof(PEM_Encode_Seal_st)
assert alignment(PEM_Encode_Seal_st) == 4, alignment(PEM_Encode_Seal_st)
PEM_ENCODE_SEAL_CTX = PEM_Encode_Seal_st
class pem_recip_st(Structure):
pass
pem_recip_st._fields_ = [
('name', STRING),
('dn', POINTER(X509_NAME)),
('cipher', c_int),
('key_enc', c_int),
]
assert sizeof(pem_recip_st) == 16, sizeof(pem_recip_st)
assert alignment(pem_recip_st) == 4, alignment(pem_recip_st)
PEM_USER = pem_recip_st
class pem_ctx_st(Structure):
pass
class N10pem_ctx_st4DOLLAR_16E(Structure):
pass
N10pem_ctx_st4DOLLAR_16E._fields_ = [
('version', c_int),
('mode', c_int),
]
assert sizeof(N10pem_ctx_st4DOLLAR_16E) == 8, sizeof(N10pem_ctx_st4DOLLAR_16E)
assert alignment(N10pem_ctx_st4DOLLAR_16E) == 4, alignment(N10pem_ctx_st4DOLLAR_16E)
class N10pem_ctx_st4DOLLAR_17E(Structure):
pass
N10pem_ctx_st4DOLLAR_17E._fields_ = [
('cipher', c_int),
]
assert sizeof(N10pem_ctx_st4DOLLAR_17E) == 4, sizeof(N10pem_ctx_st4DOLLAR_17E)
assert alignment(N10pem_ctx_st4DOLLAR_17E) == 4, alignment(N10pem_ctx_st4DOLLAR_17E)
pem_ctx_st._fields_ = [
('type', c_int),
('proc_type', N10pem_ctx_st4DOLLAR_16E),
('domain', STRING),
('DEK_info', N10pem_ctx_st4DOLLAR_17E),
('originator', POINTER(PEM_USER)),
('num_recipient', c_int),
('recipient', POINTER(POINTER(PEM_USER))),
('x509_chain', POINTER(STACK)),
('md', POINTER(EVP_MD)),
('md_enc', c_int),
('md_len', c_int),
('md_data', STRING),
('dec', POINTER(EVP_CIPHER)),
('key_len', c_int),
('key', POINTER(c_ubyte)),
('data_enc', c_int),
('data_len', c_int),
('data', POINTER(c_ubyte)),
]
assert sizeof(pem_ctx_st) == 76, sizeof(pem_ctx_st)
assert alignment(pem_ctx_st) == 4, alignment(pem_ctx_st)
PEM_CTX = pem_ctx_st
pem_password_cb = CFUNCTYPE(c_int, STRING, c_int, c_int, c_void_p)
class pkcs7_issuer_and_serial_st(Structure):
pass
pkcs7_issuer_and_serial_st._fields_ = [
('issuer', POINTER(X509_NAME)),
('serial', POINTER(ASN1_INTEGER)),
]
assert sizeof(pkcs7_issuer_and_serial_st) == 8, sizeof(pkcs7_issuer_and_serial_st)
assert alignment(pkcs7_issuer_and_serial_st) == 4, alignment(pkcs7_issuer_and_serial_st)
PKCS7_ISSUER_AND_SERIAL = pkcs7_issuer_and_serial_st
class pkcs7_signer_info_st(Structure):
pass
pkcs7_signer_info_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
('digest_alg', POINTER(X509_ALGOR)),
('auth_attr', POINTER(STACK)),
('digest_enc_alg', POINTER(X509_ALGOR)),
('enc_digest', POINTER(ASN1_OCTET_STRING)),
('unauth_attr', POINTER(STACK)),
('pkey', POINTER(EVP_PKEY)),
]
assert sizeof(pkcs7_signer_info_st) == 32, sizeof(pkcs7_signer_info_st)
assert alignment(pkcs7_signer_info_st) == 4, alignment(pkcs7_signer_info_st)
PKCS7_SIGNER_INFO = pkcs7_signer_info_st
class pkcs7_recip_info_st(Structure):
pass
pkcs7_recip_info_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
('key_enc_algor', POINTER(X509_ALGOR)),
('enc_key', POINTER(ASN1_OCTET_STRING)),
('cert', POINTER(X509)),
]
assert sizeof(pkcs7_recip_info_st) == 20, sizeof(pkcs7_recip_info_st)
assert alignment(pkcs7_recip_info_st) == 4, alignment(pkcs7_recip_info_st)
PKCS7_RECIP_INFO = pkcs7_recip_info_st
class pkcs7_signed_st(Structure):
pass
class pkcs7_st(Structure):
pass
pkcs7_signed_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('md_algs', POINTER(STACK)),
('cert', POINTER(STACK)),
('crl', POINTER(STACK)),
('signer_info', POINTER(STACK)),
('contents', POINTER(pkcs7_st)),
]
assert sizeof(pkcs7_signed_st) == 24, sizeof(pkcs7_signed_st)
assert alignment(pkcs7_signed_st) == 4, alignment(pkcs7_signed_st)
PKCS7_SIGNED = pkcs7_signed_st
class pkcs7_enc_content_st(Structure):
pass
pkcs7_enc_content_st._fields_ = [
('content_type', POINTER(ASN1_OBJECT)),
('algorithm', POINTER(X509_ALGOR)),
('enc_data', POINTER(ASN1_OCTET_STRING)),
('cipher', POINTER(EVP_CIPHER)),
]
assert sizeof(pkcs7_enc_content_st) == 16, sizeof(pkcs7_enc_content_st)
assert alignment(pkcs7_enc_content_st) == 4, alignment(pkcs7_enc_content_st)
PKCS7_ENC_CONTENT = pkcs7_enc_content_st
class pkcs7_enveloped_st(Structure):
pass
pkcs7_enveloped_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('recipientinfo', POINTER(STACK)),
('enc_data', POINTER(PKCS7_ENC_CONTENT)),
]
assert sizeof(pkcs7_enveloped_st) == 12, sizeof(pkcs7_enveloped_st)
assert alignment(pkcs7_enveloped_st) == 4, alignment(pkcs7_enveloped_st)
PKCS7_ENVELOPE = pkcs7_enveloped_st
class pkcs7_signedandenveloped_st(Structure):
pass
pkcs7_signedandenveloped_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('md_algs', POINTER(STACK)),
('cert', POINTER(STACK)),
('crl', POINTER(STACK)),
('signer_info', POINTER(STACK)),
('enc_data', POINTER(PKCS7_ENC_CONTENT)),
('recipientinfo', POINTER(STACK)),
]
assert sizeof(pkcs7_signedandenveloped_st) == 28, sizeof(pkcs7_signedandenveloped_st)
assert alignment(pkcs7_signedandenveloped_st) == 4, alignment(pkcs7_signedandenveloped_st)
PKCS7_SIGN_ENVELOPE = pkcs7_signedandenveloped_st
class pkcs7_digest_st(Structure):
pass
pkcs7_digest_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('md', POINTER(X509_ALGOR)),
('contents', POINTER(pkcs7_st)),
('digest', POINTER(ASN1_OCTET_STRING)),
]
assert sizeof(pkcs7_digest_st) == 16, sizeof(pkcs7_digest_st)
assert alignment(pkcs7_digest_st) == 4, alignment(pkcs7_digest_st)
PKCS7_DIGEST = pkcs7_digest_st
class pkcs7_encrypted_st(Structure):
pass
pkcs7_encrypted_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('enc_data', POINTER(PKCS7_ENC_CONTENT)),
]
assert sizeof(pkcs7_encrypted_st) == 8, sizeof(pkcs7_encrypted_st)
assert alignment(pkcs7_encrypted_st) == 4, alignment(pkcs7_encrypted_st)
PKCS7_ENCRYPT = pkcs7_encrypted_st
class N8pkcs7_st4DOLLAR_15E(Union):
pass
N8pkcs7_st4DOLLAR_15E._fields_ = [
('ptr', STRING),
('data', POINTER(ASN1_OCTET_STRING)),
('sign', POINTER(PKCS7_SIGNED)),
('enveloped', POINTER(PKCS7_ENVELOPE)),
('signed_and_enveloped', POINTER(PKCS7_SIGN_ENVELOPE)),
('digest', POINTER(PKCS7_DIGEST)),
('encrypted', POINTER(PKCS7_ENCRYPT)),
('other', POINTER(ASN1_TYPE)),
]
assert sizeof(N8pkcs7_st4DOLLAR_15E) == 4, sizeof(N8pkcs7_st4DOLLAR_15E)
assert alignment(N8pkcs7_st4DOLLAR_15E) == 4, alignment(N8pkcs7_st4DOLLAR_15E)
pkcs7_st._fields_ = [
('asn1', POINTER(c_ubyte)),
('length', c_long),
('state', c_int),
('detached', c_int),
('type', POINTER(ASN1_OBJECT)),
('d', N8pkcs7_st4DOLLAR_15E),
]
assert sizeof(pkcs7_st) == 24, sizeof(pkcs7_st)
assert alignment(pkcs7_st) == 4, alignment(pkcs7_st)
PKCS7 = pkcs7_st
class rc2_key_st(Structure):
pass
rc2_key_st._fields_ = [
('data', c_uint * 64),
]
assert sizeof(rc2_key_st) == 256, sizeof(rc2_key_st)
assert alignment(rc2_key_st) == 4, alignment(rc2_key_st)
RC2_KEY = rc2_key_st
class rc4_key_st(Structure):
pass
rc4_key_st._fields_ = [
('x', c_ubyte),
('y', c_ubyte),
('data', c_ubyte * 256),
]
assert sizeof(rc4_key_st) == 258, sizeof(rc4_key_st)
assert alignment(rc4_key_st) == 1, alignment(rc4_key_st)
RC4_KEY = rc4_key_st
class rc5_key_st(Structure):
pass
rc5_key_st._fields_ = [
('rounds', c_int),
('data', c_ulong * 34),
]
assert sizeof(rc5_key_st) == 140, sizeof(rc5_key_st)
assert alignment(rc5_key_st) == 4, alignment(rc5_key_st)
RC5_32_KEY = rc5_key_st
class RIPEMD160state_st(Structure):
pass
RIPEMD160state_st._fields_ = [
('A', c_uint),
('B', c_uint),
('C', c_uint),
('D', c_uint),
('E', c_uint),
('Nl', c_uint),
('Nh', c_uint),
('data', c_uint * 16),
('num', c_int),
]
assert sizeof(RIPEMD160state_st) == 96, sizeof(RIPEMD160state_st)
assert alignment(RIPEMD160state_st) == 4, alignment(RIPEMD160state_st)
RIPEMD160_CTX = RIPEMD160state_st
RSA = rsa_st
class rsa_meth_st(Structure):
pass
rsa_meth_st._fields_ = [
('name', STRING),
('rsa_pub_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
('rsa_pub_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
('rsa_priv_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
('rsa_priv_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
('rsa_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(RSA))),
('bn_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
('init', CFUNCTYPE(c_int, POINTER(RSA))),
('finish', CFUNCTYPE(c_int, POINTER(RSA))),
('flags', c_int),
('app_data', STRING),
('rsa_sign', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), POINTER(c_uint), POINTER(RSA))),
('rsa_verify', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), c_uint, POINTER(RSA))),
]
assert sizeof(rsa_meth_st) == 52, sizeof(rsa_meth_st)
assert alignment(rsa_meth_st) == 4, alignment(rsa_meth_st)
RSA_METHOD = rsa_meth_st
rsa_st._fields_ = [
('pad', c_int),
('version', c_long),
('meth', POINTER(RSA_METHOD)),
('engine', POINTER(ENGINE)),
('n', POINTER(BIGNUM)),
('e', POINTER(BIGNUM)),
('d', POINTER(BIGNUM)),
('p', POINTER(BIGNUM)),
('q', POINTER(BIGNUM)),
('dmp1', POINTER(BIGNUM)),
('dmq1', POINTER(BIGNUM)),
('iqmp', POINTER(BIGNUM)),
('ex_data', CRYPTO_EX_DATA),
('references', c_int),
('flags', c_int),
('_method_mod_n', POINTER(BN_MONT_CTX)),
('_method_mod_p', POINTER(BN_MONT_CTX)),
('_method_mod_q', POINTER(BN_MONT_CTX)),
('bignum_data', STRING),
('blinding', POINTER(BN_BLINDING)),
]
assert sizeof(rsa_st) == 84, sizeof(rsa_st)
assert alignment(rsa_st) == 4, alignment(rsa_st)
openssl_fptr = CFUNCTYPE(None)
class SHAstate_st(Structure):
pass
SHAstate_st._fields_ = [
('h0', c_uint),
('h1', c_uint),
('h2', c_uint),
('h3', c_uint),
('h4', c_uint),
('Nl', c_uint),
('Nh', c_uint),
('data', c_uint * 16),
('num', c_int),
]
assert sizeof(SHAstate_st) == 96, sizeof(SHAstate_st)
assert alignment(SHAstate_st) == 4, alignment(SHAstate_st)
SHA_CTX = SHAstate_st
class ssl_st(Structure):
pass
ssl_crock_st = POINTER(ssl_st)
class ssl_cipher_st(Structure):
pass
ssl_cipher_st._fields_ = [
('valid', c_int),
('name', STRING),
('id', c_ulong),
('algorithms', c_ulong),
('algo_strength', c_ulong),
('algorithm2', c_ulong),
('strength_bits', c_int),
('alg_bits', c_int),
('mask', c_ulong),
('mask_strength', c_ulong),
]
assert sizeof(ssl_cipher_st) == 40, sizeof(ssl_cipher_st)
assert alignment(ssl_cipher_st) == 4, alignment(ssl_cipher_st)
SSL_CIPHER = ssl_cipher_st
SSL = ssl_st
class ssl_ctx_st(Structure):
pass
SSL_CTX = ssl_ctx_st
class ssl_method_st(Structure):
pass
class ssl3_enc_method(Structure):
pass
ssl_method_st._fields_ = [
('version', c_int),
('ssl_new', CFUNCTYPE(c_int, POINTER(SSL))),
('ssl_clear', CFUNCTYPE(None, POINTER(SSL))),
('ssl_free', CFUNCTYPE(None, POINTER(SSL))),
('ssl_accept', CFUNCTYPE(c_int, POINTER(SSL))),
('ssl_connect', CFUNCTYPE(c_int, POINTER(SSL))),
('ssl_read', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
('ssl_peek', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
('ssl_write', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
('ssl_shutdown', CFUNCTYPE(c_int, POINTER(SSL))),
('ssl_renegotiate', CFUNCTYPE(c_int, POINTER(SSL))),
('ssl_renegotiate_check', CFUNCTYPE(c_int, POINTER(SSL))),
('ssl_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, c_long, c_void_p)),
('ssl_ctx_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, c_long, c_void_p)),
('get_cipher_by_char', CFUNCTYPE(POINTER(SSL_CIPHER), POINTER(c_ubyte))),
('put_cipher_by_char', CFUNCTYPE(c_int, POINTER(SSL_CIPHER), POINTER(c_ubyte))),
('ssl_pending', CFUNCTYPE(c_int, POINTER(SSL))),
('num_ciphers', CFUNCTYPE(c_int)),
('get_cipher', CFUNCTYPE(POINTER(SSL_CIPHER), c_uint)),
('get_ssl_method', CFUNCTYPE(POINTER(ssl_method_st), c_int)),
('get_timeout', CFUNCTYPE(c_long)),
('ssl3_enc', POINTER(ssl3_enc_method)),
('ssl_version', CFUNCTYPE(c_int)),
('ssl_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, CFUNCTYPE(None))),
('ssl_ctx_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, CFUNCTYPE(None))),
]
assert sizeof(ssl_method_st) == 100, sizeof(ssl_method_st)
assert alignment(ssl_method_st) == 4, alignment(ssl_method_st)
ssl3_enc_method._fields_ = [
]
SSL_METHOD = ssl_method_st
class ssl_session_st(Structure):
pass
class sess_cert_st(Structure):
pass
ssl_session_st._fields_ = [
('ssl_version', c_int),
('key_arg_length', c_uint),
('key_arg', c_ubyte * 8),
('master_key_length', c_int),
('master_key', c_ubyte * 48),
('session_id_length', c_uint),
('session_id', c_ubyte * 32),
('sid_ctx_length', c_uint),
('sid_ctx', c_ubyte * 32),
('not_resumable', c_int),
('sess_cert', POINTER(sess_cert_st)),
('peer', POINTER(X509)),
('verify_result', c_long),
('references', c_int),
('timeout', c_long),
('time', c_long),
('compress_meth', c_int),
('cipher', POINTER(SSL_CIPHER)),
('cipher_id', c_ulong),
('ciphers', POINTER(STACK)),
('ex_data', CRYPTO_EX_DATA),
('prev', POINTER(ssl_session_st)),
('next', POINTER(ssl_session_st)),
]
assert sizeof(ssl_session_st) == 200, sizeof(ssl_session_st)
assert alignment(ssl_session_st) == 4, alignment(ssl_session_st)
sess_cert_st._fields_ = [
]
SSL_SESSION = ssl_session_st
GEN_SESSION_CB = CFUNCTYPE(c_int, POINTER(SSL), POINTER(c_ubyte), POINTER(c_uint))
class ssl_comp_st(Structure):
pass
ssl_comp_st._fields_ = [
('id', c_int),
('name', STRING),
('method', POINTER(COMP_METHOD)),
]
assert sizeof(ssl_comp_st) == 12, sizeof(ssl_comp_st)
assert alignment(ssl_comp_st) == 4, alignment(ssl_comp_st)
SSL_COMP = ssl_comp_st
class N10ssl_ctx_st4DOLLAR_18E(Structure):
pass
N10ssl_ctx_st4DOLLAR_18E._fields_ = [
('sess_connect', c_int),
('sess_connect_renegotiate', c_int),
('sess_connect_good', c_int),
('sess_accept', c_int),
('sess_accept_renegotiate', c_int),
('sess_accept_good', c_int),
('sess_miss', c_int),
('sess_timeout', c_int),
('sess_cache_full', c_int),
('sess_hit', c_int),
('sess_cb_hit', c_int),
]
assert sizeof(N10ssl_ctx_st4DOLLAR_18E) == 44, sizeof(N10ssl_ctx_st4DOLLAR_18E)
assert alignment(N10ssl_ctx_st4DOLLAR_18E) == 4, alignment(N10ssl_ctx_st4DOLLAR_18E)
class cert_st(Structure):
pass
ssl_ctx_st._fields_ = [
('method', POINTER(SSL_METHOD)),
('cipher_list', POINTER(STACK)),
('cipher_list_by_id', POINTER(STACK)),
('cert_store', POINTER(x509_store_st)),
('sessions', POINTER(lhash_st)),
('session_cache_size', c_ulong),
('session_cache_head', POINTER(ssl_session_st)),
('session_cache_tail', POINTER(ssl_session_st)),
('session_cache_mode', c_int),
('session_timeout', c_long),
('new_session_cb', CFUNCTYPE(c_int, POINTER(ssl_st), POINTER(SSL_SESSION))),
('remove_session_cb', CFUNCTYPE(None, POINTER(ssl_ctx_st), POINTER(SSL_SESSION))),
('get_session_cb', CFUNCTYPE(POINTER(SSL_SESSION), POINTER(ssl_st), POINTER(c_ubyte), c_int, POINTER(c_int))),
('stats', N10ssl_ctx_st4DOLLAR_18E),
('references', c_int),
('app_verify_callback', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), c_void_p)),
('app_verify_arg', c_void_p),
('default_passwd_callback', POINTER(pem_password_cb)),
('default_passwd_callback_userdata', c_void_p),
('client_cert_cb', CFUNCTYPE(c_int, POINTER(SSL), POINTER(POINTER(X509)), POINTER(POINTER(EVP_PKEY)))),
('ex_data', CRYPTO_EX_DATA),
('rsa_md5', POINTER(EVP_MD)),
('md5', POINTER(EVP_MD)),
('sha1', POINTER(EVP_MD)),
('extra_certs', POINTER(STACK)),
('comp_methods', POINTER(STACK)),
('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
('client_CA', POINTER(STACK)),
('options', c_ulong),
('mode', c_ulong),
('max_cert_list', c_long),
('cert', POINTER(cert_st)),
('read_ahead', c_int),
('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
('msg_callback_arg', c_void_p),
('verify_mode', c_int),
('verify_depth', c_int),
('sid_ctx_length', c_uint),
('sid_ctx', c_ubyte * 32),
('default_verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
('generate_session_id', GEN_SESSION_CB),
('purpose', c_int),
('trust', c_int),
('quiet_shutdown', c_int),
]
assert sizeof(ssl_ctx_st) == 248, sizeof(ssl_ctx_st)
assert alignment(ssl_ctx_st) == 4, alignment(ssl_ctx_st)
cert_st._fields_ = [
]
class ssl2_state_st(Structure):
pass
class ssl3_state_st(Structure):
pass
ssl_st._fields_ = [
('version', c_int),
('type', c_int),
('method', POINTER(SSL_METHOD)),
('rbio', POINTER(BIO)),
('wbio', POINTER(BIO)),
('bbio', POINTER(BIO)),
('rwstate', c_int),
('in_handshake', c_int),
('handshake_func', CFUNCTYPE(c_int)),
('server', c_int),
('new_session', c_int),
('quiet_shutdown', c_int),
('shutdown', c_int),
('state', c_int),
('rstate', c_int),
('init_buf', POINTER(BUF_MEM)),
('init_msg', c_void_p),
('init_num', c_int),
('init_off', c_int),
('packet', POINTER(c_ubyte)),
('packet_length', c_uint),
('s2', POINTER(ssl2_state_st)),
('s3', POINTER(ssl3_state_st)),
('read_ahead', c_int),
('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
('msg_callback_arg', c_void_p),
('hit', c_int),
('purpose', c_int),
('trust', c_int),
('cipher_list', POINTER(STACK)),
('cipher_list_by_id', POINTER(STACK)),
('enc_read_ctx', POINTER(EVP_CIPHER_CTX)),
('read_hash', POINTER(EVP_MD)),
('expand', POINTER(COMP_CTX)),
('enc_write_ctx', POINTER(EVP_CIPHER_CTX)),
('write_hash', POINTER(EVP_MD)),
('compress', POINTER(COMP_CTX)),
('cert', POINTER(cert_st)),
('sid_ctx_length', c_uint),
('sid_ctx', c_ubyte * 32),
('session', POINTER(SSL_SESSION)),
('generate_session_id', GEN_SESSION_CB),
('verify_mode', c_int),
('verify_depth', c_int),
('verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
('error', c_int),
('error_code', c_int),
('ctx', POINTER(SSL_CTX)),
('debug', c_int),
('verify_result', c_long),
('ex_data', CRYPTO_EX_DATA),
('client_CA', POINTER(STACK)),
('references', c_int),
('options', c_ulong),
('mode', c_ulong),
('max_cert_list', c_long),
('first_packet', c_int),
('client_version', c_int),
]
assert sizeof(ssl_st) == 268, sizeof(ssl_st)
assert alignment(ssl_st) == 4, alignment(ssl_st)
class N13ssl2_state_st4DOLLAR_19E(Structure):
pass
N13ssl2_state_st4DOLLAR_19E._fields_ = [
('conn_id_length', c_uint),
('cert_type', c_uint),
('cert_length', c_uint),
('csl', c_uint),
('clear', c_uint),
('enc', c_uint),
('ccl', c_ubyte * 32),
('cipher_spec_length', c_uint),
('session_id_length', c_uint),
('clen', c_uint),
('rlen', c_uint),
]
assert sizeof(N13ssl2_state_st4DOLLAR_19E) == 72, sizeof(N13ssl2_state_st4DOLLAR_19E)
assert alignment(N13ssl2_state_st4DOLLAR_19E) == 4, alignment(N13ssl2_state_st4DOLLAR_19E)
ssl2_state_st._fields_ = [
('three_byte_header', c_int),
('clear_text', c_int),
('escape', c_int),
('ssl2_rollback', c_int),
('wnum', c_uint),
('wpend_tot', c_int),
('wpend_buf', POINTER(c_ubyte)),
('wpend_off', c_int),
('wpend_len', c_int),
('wpend_ret', c_int),
('rbuf_left', c_int),
('rbuf_offs', c_int),
('rbuf', POINTER(c_ubyte)),
('wbuf', POINTER(c_ubyte)),
('write_ptr', POINTER(c_ubyte)),
('padding', c_uint),
('rlength', c_uint),
('ract_data_length', c_int),
('wlength', c_uint),
('wact_data_length', c_int),
('ract_data', POINTER(c_ubyte)),
('wact_data', POINTER(c_ubyte)),
('mac_data', POINTER(c_ubyte)),
('read_key', POINTER(c_ubyte)),
('write_key', POINTER(c_ubyte)),
('challenge_length', c_uint),
('challenge', c_ubyte * 32),
('conn_id_length', c_uint),
('conn_id', c_ubyte * 16),
('key_material_length', c_uint),
('key_material', c_ubyte * 48),
('read_sequence', c_ulong),
('write_sequence', c_ulong),
('tmp', N13ssl2_state_st4DOLLAR_19E),
]
assert sizeof(ssl2_state_st) == 288, sizeof(ssl2_state_st)
assert alignment(ssl2_state_st) == 4, alignment(ssl2_state_st)
SSL2_STATE = ssl2_state_st
class ssl3_record_st(Structure):
pass
ssl3_record_st._fields_ = [
('type', c_int),
('length', c_uint),
('off', c_uint),
('data', POINTER(c_ubyte)),
('input', POINTER(c_ubyte)),
('comp', POINTER(c_ubyte)),
]
assert sizeof(ssl3_record_st) == 24, sizeof(ssl3_record_st)
assert alignment(ssl3_record_st) == 4, alignment(ssl3_record_st)
SSL3_RECORD = ssl3_record_st
class ssl3_buffer_st(Structure):
pass
size_t = __darwin_size_t
ssl3_buffer_st._fields_ = [
('buf', POINTER(c_ubyte)),
('len', size_t),
('offset', c_int),
('left', c_int),
]
assert sizeof(ssl3_buffer_st) == 16, sizeof(ssl3_buffer_st)
assert alignment(ssl3_buffer_st) == 4, alignment(ssl3_buffer_st)
SSL3_BUFFER = ssl3_buffer_st
class N13ssl3_state_st4DOLLAR_20E(Structure):
pass
N13ssl3_state_st4DOLLAR_20E._fields_ = [
('cert_verify_md', c_ubyte * 72),
('finish_md', c_ubyte * 72),
('finish_md_len', c_int),
('peer_finish_md', c_ubyte * 72),
('peer_finish_md_len', c_int),
('message_size', c_ulong),
('message_type', c_int),
('new_cipher', POINTER(SSL_CIPHER)),
('dh', POINTER(DH)),
('next_state', c_int),
('reuse_message', c_int),
('cert_req', c_int),
('ctype_num', c_int),
('ctype', c_char * 7),
('ca_names', POINTER(STACK)),
('use_rsa_tmp', c_int),
('key_block_length', c_int),
('key_block', POINTER(c_ubyte)),
('new_sym_enc', POINTER(EVP_CIPHER)),
('new_hash', POINTER(EVP_MD)),
('new_compression', POINTER(SSL_COMP)),
('cert_request', c_int),
]
assert sizeof(N13ssl3_state_st4DOLLAR_20E) == 296, sizeof(N13ssl3_state_st4DOLLAR_20E)
assert alignment(N13ssl3_state_st4DOLLAR_20E) == 4, alignment(N13ssl3_state_st4DOLLAR_20E)
ssl3_state_st._fields_ = [
('flags', c_long),
('delay_buf_pop_ret', c_int),
('read_sequence', c_ubyte * 8),
('read_mac_secret', c_ubyte * 36),
('write_sequence', c_ubyte * 8),
('write_mac_secret', c_ubyte * 36),
('server_random', c_ubyte * 32),
('client_random', c_ubyte * 32),
('need_empty_fragments', c_int),
('empty_fragment_done', c_int),
('rbuf', SSL3_BUFFER),
('wbuf', SSL3_BUFFER),
('rrec', SSL3_RECORD),
('wrec', SSL3_RECORD),
('alert_fragment', c_ubyte * 2),
('alert_fragment_len', c_uint),
('handshake_fragment', c_ubyte * 4),
('handshake_fragment_len', c_uint),
('wnum', c_uint),
('wpend_tot', c_int),
('wpend_type', c_int),
('wpend_ret', c_int),
('wpend_buf', POINTER(c_ubyte)),
('finish_dgst1', EVP_MD_CTX),
('finish_dgst2', EVP_MD_CTX),
('change_cipher_spec', c_int),
('warn_alert', c_int),
('fatal_alert', c_int),
('alert_dispatch', c_int),
('send_alert', c_ubyte * 2),
('renegotiate', c_int),
('total_renegotiations', c_int),
('num_renegotiations', c_int),
('in_read_app_data', c_int),
('tmp', N13ssl3_state_st4DOLLAR_20E),
]
assert sizeof(ssl3_state_st) == 648, sizeof(ssl3_state_st)
assert alignment(ssl3_state_st) == 4, alignment(ssl3_state_st)
SSL3_STATE = ssl3_state_st
stack_st._fields_ = [
('num', c_int),
('data', POINTER(STRING)),
('sorted', c_int),
('num_alloc', c_int),
('comp', CFUNCTYPE(c_int, POINTER(STRING), POINTER(STRING))),
]
assert sizeof(stack_st) == 20, sizeof(stack_st)
assert alignment(stack_st) == 4, alignment(stack_st)
class ui_st(Structure):
pass
ui_st._fields_ = [
]
UI = ui_st
class ui_method_st(Structure):
pass
ui_method_st._fields_ = [
]
UI_METHOD = ui_method_st
class ui_string_st(Structure):
pass
ui_string_st._fields_ = [
]
UI_STRING = ui_string_st
# values for enumeration 'UI_string_types'
UI_string_types = c_int # enum
class X509_objects_st(Structure):
pass
X509_objects_st._fields_ = [
('nid', c_int),
('a2i', CFUNCTYPE(c_int)),
('i2a', CFUNCTYPE(c_int)),
]
assert sizeof(X509_objects_st) == 12, sizeof(X509_objects_st)
assert alignment(X509_objects_st) == 4, alignment(X509_objects_st)
X509_OBJECTS = X509_objects_st
X509_algor_st._fields_ = [
('algorithm', POINTER(ASN1_OBJECT)),
('parameter', POINTER(ASN1_TYPE)),
]
assert sizeof(X509_algor_st) == 8, sizeof(X509_algor_st)
assert alignment(X509_algor_st) == 4, alignment(X509_algor_st)
class X509_val_st(Structure):
pass
X509_val_st._fields_ = [
('notBefore', POINTER(ASN1_TIME)),
('notAfter', POINTER(ASN1_TIME)),
]
assert sizeof(X509_val_st) == 8, sizeof(X509_val_st)
assert alignment(X509_val_st) == 4, alignment(X509_val_st)
X509_VAL = X509_val_st
class X509_pubkey_st(Structure):
pass
X509_pubkey_st._fields_ = [
('algor', POINTER(X509_ALGOR)),
('public_key', POINTER(ASN1_BIT_STRING)),
('pkey', POINTER(EVP_PKEY)),
]
assert sizeof(X509_pubkey_st) == 12, sizeof(X509_pubkey_st)
assert alignment(X509_pubkey_st) == 4, alignment(X509_pubkey_st)
X509_PUBKEY = X509_pubkey_st
class X509_sig_st(Structure):
pass
X509_sig_st._fields_ = [
('algor', POINTER(X509_ALGOR)),
('digest', POINTER(ASN1_OCTET_STRING)),
]
assert sizeof(X509_sig_st) == 8, sizeof(X509_sig_st)
assert alignment(X509_sig_st) == 4, alignment(X509_sig_st)
X509_SIG = X509_sig_st
class X509_name_entry_st(Structure):
pass
X509_name_entry_st._fields_ = [
('object', POINTER(ASN1_OBJECT)),
('value', POINTER(ASN1_STRING)),
('set', c_int),
('size', c_int),
]
assert sizeof(X509_name_entry_st) == 16, sizeof(X509_name_entry_st)
assert alignment(X509_name_entry_st) == 4, alignment(X509_name_entry_st)
X509_NAME_ENTRY = X509_name_entry_st
X509_name_st._fields_ = [
('entries', POINTER(STACK)),
('modified', c_int),
('bytes', POINTER(BUF_MEM)),
('hash', c_ulong),
]
assert sizeof(X509_name_st) == 16, sizeof(X509_name_st)
assert alignment(X509_name_st) == 4, alignment(X509_name_st)
class X509_extension_st(Structure):
pass
X509_extension_st._fields_ = [
('object', POINTER(ASN1_OBJECT)),
('critical', ASN1_BOOLEAN),
('value', POINTER(ASN1_OCTET_STRING)),
]
assert sizeof(X509_extension_st) == 12, sizeof(X509_extension_st)
assert alignment(X509_extension_st) == 4, alignment(X509_extension_st)
X509_EXTENSION = X509_extension_st
class x509_attributes_st(Structure):
pass
class N18x509_attributes_st4DOLLAR_13E(Union):
pass
N18x509_attributes_st4DOLLAR_13E._fields_ = [
('ptr', STRING),
('set', POINTER(STACK)),
('single', POINTER(ASN1_TYPE)),
]
assert sizeof(N18x509_attributes_st4DOLLAR_13E) == 4, sizeof(N18x509_attributes_st4DOLLAR_13E)
assert alignment(N18x509_attributes_st4DOLLAR_13E) == 4, alignment(N18x509_attributes_st4DOLLAR_13E)
x509_attributes_st._fields_ = [
('object', POINTER(ASN1_OBJECT)),
('single', c_int),
('value', N18x509_attributes_st4DOLLAR_13E),
]
assert sizeof(x509_attributes_st) == 12, sizeof(x509_attributes_st)
assert alignment(x509_attributes_st) == 4, alignment(x509_attributes_st)
X509_ATTRIBUTE = x509_attributes_st
class X509_req_info_st(Structure):
pass
X509_req_info_st._fields_ = [
('enc', ASN1_ENCODING),
('version', POINTER(ASN1_INTEGER)),
('subject', POINTER(X509_NAME)),
('pubkey', POINTER(X509_PUBKEY)),
('attributes', POINTER(STACK)),
]
assert sizeof(X509_req_info_st) == 28, sizeof(X509_req_info_st)
assert alignment(X509_req_info_st) == 4, alignment(X509_req_info_st)
X509_REQ_INFO = X509_req_info_st
class X509_req_st(Structure):
pass
X509_req_st._fields_ = [
('req_info', POINTER(X509_REQ_INFO)),
('sig_alg', POINTER(X509_ALGOR)),
('signature', POINTER(ASN1_BIT_STRING)),
('references', c_int),
]
assert sizeof(X509_req_st) == 16, sizeof(X509_req_st)
assert alignment(X509_req_st) == 4, alignment(X509_req_st)
X509_REQ = X509_req_st
class x509_cinf_st(Structure):
pass
x509_cinf_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('serialNumber', POINTER(ASN1_INTEGER)),
('signature', POINTER(X509_ALGOR)),
('issuer', POINTER(X509_NAME)),
('validity', POINTER(X509_VAL)),
('subject', POINTER(X509_NAME)),
('key', POINTER(X509_PUBKEY)),
('issuerUID', POINTER(ASN1_BIT_STRING)),
('subjectUID', POINTER(ASN1_BIT_STRING)),
('extensions', POINTER(STACK)),
]
assert sizeof(x509_cinf_st) == 40, sizeof(x509_cinf_st)
assert alignment(x509_cinf_st) == 4, alignment(x509_cinf_st)
X509_CINF = x509_cinf_st
class x509_cert_aux_st(Structure):
pass
x509_cert_aux_st._fields_ = [
('trust', POINTER(STACK)),
('reject', POINTER(STACK)),
('alias', POINTER(ASN1_UTF8STRING)),
('keyid', POINTER(ASN1_OCTET_STRING)),
('other', POINTER(STACK)),
]
assert sizeof(x509_cert_aux_st) == 20, sizeof(x509_cert_aux_st)
assert alignment(x509_cert_aux_st) == 4, alignment(x509_cert_aux_st)
X509_CERT_AUX = x509_cert_aux_st
class AUTHORITY_KEYID_st(Structure):
pass
x509_st._fields_ = [
('cert_info', POINTER(X509_CINF)),
('sig_alg', POINTER(X509_ALGOR)),
('signature', POINTER(ASN1_BIT_STRING)),
('valid', c_int),
('references', c_int),
('name', STRING),
('ex_data', CRYPTO_EX_DATA),
('ex_pathlen', c_long),
('ex_flags', c_ulong),
('ex_kusage', c_ulong),
('ex_xkusage', c_ulong),
('ex_nscert', c_ulong),
('skid', POINTER(ASN1_OCTET_STRING)),
('akid', POINTER(AUTHORITY_KEYID_st)),
('sha1_hash', c_ubyte * 20),
('aux', POINTER(X509_CERT_AUX)),
]
assert sizeof(x509_st) == 84, sizeof(x509_st)
assert alignment(x509_st) == 4, alignment(x509_st)
AUTHORITY_KEYID_st._fields_ = [
]
class x509_trust_st(Structure):
pass
x509_trust_st._fields_ = [
('trust', c_int),
('flags', c_int),
('check_trust', CFUNCTYPE(c_int, POINTER(x509_trust_st), POINTER(X509), c_int)),
('name', STRING),
('arg1', c_int),
('arg2', c_void_p),
]
assert sizeof(x509_trust_st) == 24, sizeof(x509_trust_st)
assert alignment(x509_trust_st) == 4, alignment(x509_trust_st)
X509_TRUST = x509_trust_st
class X509_revoked_st(Structure):
pass
X509_revoked_st._fields_ = [
('serialNumber', POINTER(ASN1_INTEGER)),
('revocationDate', POINTER(ASN1_TIME)),
('extensions', POINTER(STACK)),
('sequence', c_int),
]
assert sizeof(X509_revoked_st) == 16, sizeof(X509_revoked_st)
assert alignment(X509_revoked_st) == 4, alignment(X509_revoked_st)
X509_REVOKED = X509_revoked_st
class X509_crl_info_st(Structure):
pass
X509_crl_info_st._fields_ = [
('version', POINTER(ASN1_INTEGER)),
('sig_alg', POINTER(X509_ALGOR)),
('issuer', POINTER(X509_NAME)),
('lastUpdate', POINTER(ASN1_TIME)),
('nextUpdate', POINTER(ASN1_TIME)),
('revoked', POINTER(STACK)),
('extensions', POINTER(STACK)),
('enc', ASN1_ENCODING),
]
assert sizeof(X509_crl_info_st) == 40, sizeof(X509_crl_info_st)
assert alignment(X509_crl_info_st) == 4, alignment(X509_crl_info_st)
X509_CRL_INFO = X509_crl_info_st
X509_crl_st._fields_ = [
('crl', POINTER(X509_CRL_INFO)),
('sig_alg', POINTER(X509_ALGOR)),
('signature', POINTER(ASN1_BIT_STRING)),
('references', c_int),
]
assert sizeof(X509_crl_st) == 16, sizeof(X509_crl_st)
assert alignment(X509_crl_st) == 4, alignment(X509_crl_st)
class private_key_st(Structure):
pass
private_key_st._fields_ = [
('version', c_int),
('enc_algor', POINTER(X509_ALGOR)),
('enc_pkey', POINTER(ASN1_OCTET_STRING)),
('dec_pkey', POINTER(EVP_PKEY)),
('key_length', c_int),
('key_data', STRING),
('key_free', c_int),
('cipher', EVP_CIPHER_INFO),
('references', c_int),
]
assert sizeof(private_key_st) == 52, sizeof(private_key_st)
assert alignment(private_key_st) == 4, alignment(private_key_st)
X509_PKEY = private_key_st
class X509_info_st(Structure):
pass
X509_info_st._fields_ = [
('x509', POINTER(X509)),
('crl', POINTER(X509_CRL)),
('x_pkey', POINTER(X509_PKEY)),
('enc_cipher', EVP_CIPHER_INFO),
('enc_len', c_int),
('enc_data', STRING),
('references', c_int),
]
assert sizeof(X509_info_st) == 44, sizeof(X509_info_st)
assert alignment(X509_info_st) == 4, alignment(X509_info_st)
X509_INFO = X509_info_st
class Netscape_spkac_st(Structure):
pass
Netscape_spkac_st._fields_ = [
('pubkey', POINTER(X509_PUBKEY)),
('challenge', POINTER(ASN1_IA5STRING)),
]
assert sizeof(Netscape_spkac_st) == 8, sizeof(Netscape_spkac_st)
assert alignment(Netscape_spkac_st) == 4, alignment(Netscape_spkac_st)
NETSCAPE_SPKAC = Netscape_spkac_st
class Netscape_spki_st(Structure):
pass
Netscape_spki_st._fields_ = [
('spkac', POINTER(NETSCAPE_SPKAC)),
('sig_algor', POINTER(X509_ALGOR)),
('signature', POINTER(ASN1_BIT_STRING)),
]
assert sizeof(Netscape_spki_st) == 12, sizeof(Netscape_spki_st)
assert alignment(Netscape_spki_st) == 4, alignment(Netscape_spki_st)
NETSCAPE_SPKI = Netscape_spki_st
class Netscape_certificate_sequence(Structure):
pass
Netscape_certificate_sequence._fields_ = [
('type', POINTER(ASN1_OBJECT)),
('certs', POINTER(STACK)),
]
assert sizeof(Netscape_certificate_sequence) == 8, sizeof(Netscape_certificate_sequence)
assert alignment(Netscape_certificate_sequence) == 4, alignment(Netscape_certificate_sequence)
NETSCAPE_CERT_SEQUENCE = Netscape_certificate_sequence
class PBEPARAM_st(Structure):
pass
PBEPARAM_st._fields_ = [
('salt', POINTER(ASN1_OCTET_STRING)),
('iter', POINTER(ASN1_INTEGER)),
]
assert sizeof(PBEPARAM_st) == 8, sizeof(PBEPARAM_st)
assert alignment(PBEPARAM_st) == 4, alignment(PBEPARAM_st)
PBEPARAM = PBEPARAM_st
class PBE2PARAM_st(Structure):
pass
PBE2PARAM_st._fields_ = [
('keyfunc', POINTER(X509_ALGOR)),
('encryption', POINTER(X509_ALGOR)),
]
assert sizeof(PBE2PARAM_st) == 8, sizeof(PBE2PARAM_st)
assert alignment(PBE2PARAM_st) == 4, alignment(PBE2PARAM_st)
PBE2PARAM = PBE2PARAM_st
class PBKDF2PARAM_st(Structure):
pass
PBKDF2PARAM_st._fields_ = [
('salt', POINTER(ASN1_TYPE)),
('iter', POINTER(ASN1_INTEGER)),
('keylength', POINTER(ASN1_INTEGER)),
('prf', POINTER(X509_ALGOR)),
]
assert sizeof(PBKDF2PARAM_st) == 16, sizeof(PBKDF2PARAM_st)
assert alignment(PBKDF2PARAM_st) == 4, alignment(PBKDF2PARAM_st)
PBKDF2PARAM = PBKDF2PARAM_st
class pkcs8_priv_key_info_st(Structure):
pass
pkcs8_priv_key_info_st._fields_ = [
('broken', c_int),
('version', POINTER(ASN1_INTEGER)),
('pkeyalg', POINTER(X509_ALGOR)),
('pkey', POINTER(ASN1_TYPE)),
('attributes', POINTER(STACK)),
]
assert sizeof(pkcs8_priv_key_info_st) == 20, sizeof(pkcs8_priv_key_info_st)
assert alignment(pkcs8_priv_key_info_st) == 4, alignment(pkcs8_priv_key_info_st)
PKCS8_PRIV_KEY_INFO = pkcs8_priv_key_info_st
class x509_hash_dir_st(Structure):
pass
x509_hash_dir_st._fields_ = [
('num_dirs', c_int),
('dirs', POINTER(STRING)),
('dirs_type', POINTER(c_int)),
('num_dirs_alloced', c_int),
]
assert sizeof(x509_hash_dir_st) == 16, sizeof(x509_hash_dir_st)
assert alignment(x509_hash_dir_st) == 4, alignment(x509_hash_dir_st)
X509_HASH_DIR_CTX = x509_hash_dir_st
class x509_file_st(Structure):
pass
x509_file_st._fields_ = [
('num_paths', c_int),
('num_alloced', c_int),
('paths', POINTER(STRING)),
('path_type', POINTER(c_int)),
]
assert sizeof(x509_file_st) == 16, sizeof(x509_file_st)
assert alignment(x509_file_st) == 4, alignment(x509_file_st)
X509_CERT_FILE_CTX = x509_file_st
class x509_object_st(Structure):
pass
class N14x509_object_st4DOLLAR_14E(Union):
pass
N14x509_object_st4DOLLAR_14E._fields_ = [
('ptr', STRING),
('x509', POINTER(X509)),
('crl', POINTER(X509_CRL)),
('pkey', POINTER(EVP_PKEY)),
]
assert sizeof(N14x509_object_st4DOLLAR_14E) == 4, sizeof(N14x509_object_st4DOLLAR_14E)
assert alignment(N14x509_object_st4DOLLAR_14E) == 4, alignment(N14x509_object_st4DOLLAR_14E)
x509_object_st._fields_ = [
('type', c_int),
('data', N14x509_object_st4DOLLAR_14E),
]
assert sizeof(x509_object_st) == 8, sizeof(x509_object_st)
assert alignment(x509_object_st) == 4, alignment(x509_object_st)
X509_OBJECT = x509_object_st
class x509_lookup_st(Structure):
pass
X509_LOOKUP = x509_lookup_st
class x509_lookup_method_st(Structure):
pass
x509_lookup_method_st._fields_ = [
('name', STRING),
('new_item', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
('free', CFUNCTYPE(None, POINTER(X509_LOOKUP))),
('init', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
('shutdown', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
('ctrl', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_long, POINTER(STRING))),
('get_by_subject', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(X509_OBJECT))),
('get_by_issuer_serial', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(ASN1_INTEGER), POINTER(X509_OBJECT))),
('get_by_fingerprint', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(c_ubyte), c_int, POINTER(X509_OBJECT))),
('get_by_alias', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_int, POINTER(X509_OBJECT))),
]
assert sizeof(x509_lookup_method_st) == 40, sizeof(x509_lookup_method_st)
assert alignment(x509_lookup_method_st) == 4, alignment(x509_lookup_method_st)
X509_LOOKUP_METHOD = x509_lookup_method_st
x509_store_st._fields_ = [
('cache', c_int),
('objs', POINTER(STACK)),
('get_cert_methods', POINTER(STACK)),
('flags', c_ulong),
('purpose', c_int),
('trust', c_int),
('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
('ex_data', CRYPTO_EX_DATA),
('references', c_int),
('depth', c_int),
]
assert sizeof(x509_store_st) == 76, sizeof(x509_store_st)
assert alignment(x509_store_st) == 4, alignment(x509_store_st)
x509_lookup_st._fields_ = [
('init', c_int),
('skip', c_int),
('method', POINTER(X509_LOOKUP_METHOD)),
('method_data', STRING),
('store_ctx', POINTER(X509_STORE)),
]
assert sizeof(x509_lookup_st) == 20, sizeof(x509_lookup_st)
assert alignment(x509_lookup_st) == 4, alignment(x509_lookup_st)
time_t = __darwin_time_t
x509_store_ctx_st._fields_ = [
('ctx', POINTER(X509_STORE)),
('current_method', c_int),
('cert', POINTER(X509)),
('untrusted', POINTER(STACK)),
('purpose', c_int),
('trust', c_int),
('check_time', time_t),
('flags', c_ulong),
('other_ctx', c_void_p),
('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
('depth', c_int),
('valid', c_int),
('last_untrusted', c_int),
('chain', POINTER(STACK)),
('error_depth', c_int),
('error', c_int),
('current_cert', POINTER(X509)),
('current_issuer', POINTER(X509)),
('current_crl', POINTER(X509_CRL)),
('ex_data', CRYPTO_EX_DATA),
]
assert sizeof(x509_store_ctx_st) == 116, sizeof(x509_store_ctx_st)
assert alignment(x509_store_ctx_st) == 4, alignment(x509_store_ctx_st)
va_list = __darwin_va_list
__darwin_off_t = __int64_t
fpos_t = __darwin_off_t
class __sbuf(Structure):
pass
__sbuf._fields_ = [
('_base', POINTER(c_ubyte)),
('_size', c_int),
]
assert sizeof(__sbuf) == 8, sizeof(__sbuf)
assert alignment(__sbuf) == 4, alignment(__sbuf)
class __sFILEX(Structure):
pass
__sFILEX._fields_ = [
]
class __sFILE(Structure):
pass
__sFILE._pack_ = 4
__sFILE._fields_ = [
('_p', POINTER(c_ubyte)),
('_r', c_int),
('_w', c_int),
('_flags', c_short),
('_file', c_short),
('_bf', __sbuf),
('_lbfsize', c_int),
('_cookie', c_void_p),
('_close', CFUNCTYPE(c_int, c_void_p)),
('_read', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
('_seek', CFUNCTYPE(fpos_t, c_void_p, c_longlong, c_int)),
('_write', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
('_ub', __sbuf),
('_extra', POINTER(__sFILEX)),
('_ur', c_int),
('_ubuf', c_ubyte * 3),
('_nbuf', c_ubyte * 1),
('_lb', __sbuf),
('_blksize', c_int),
('_offset', fpos_t),
]
assert sizeof(__sFILE) == 88, sizeof(__sFILE)
assert alignment(__sFILE) == 4, alignment(__sFILE)
FILE = __sFILE
ct_rune_t = __darwin_ct_rune_t
rune_t = __darwin_rune_t
class div_t(Structure):
pass
div_t._fields_ = [
('quot', c_int),
('rem', c_int),
]
assert sizeof(div_t) == 8, sizeof(div_t)
assert alignment(div_t) == 4, alignment(div_t)
class ldiv_t(Structure):
pass
ldiv_t._fields_ = [
('quot', c_long),
('rem', c_long),
]
assert sizeof(ldiv_t) == 8, sizeof(ldiv_t)
assert alignment(ldiv_t) == 4, alignment(ldiv_t)
class lldiv_t(Structure):
pass
lldiv_t._pack_ = 4
lldiv_t._fields_ = [
('quot', c_longlong),
('rem', c_longlong),
]
assert sizeof(lldiv_t) == 16, sizeof(lldiv_t)
assert alignment(lldiv_t) == 4, alignment(lldiv_t)
__darwin_dev_t = __int32_t
dev_t = __darwin_dev_t
__darwin_mode_t = __uint16_t
mode_t = __darwin_mode_t
class mcontext(Structure):
pass
mcontext._fields_ = [
]
class mcontext64(Structure):
pass
mcontext64._fields_ = [
]
class __darwin_pthread_handler_rec(Structure):
pass
__darwin_pthread_handler_rec._fields_ = [
('__routine', CFUNCTYPE(None, c_void_p)),
('__arg', c_void_p),
('__next', POINTER(__darwin_pthread_handler_rec)),
]
assert sizeof(__darwin_pthread_handler_rec) == 12, sizeof(__darwin_pthread_handler_rec)
assert alignment(__darwin_pthread_handler_rec) == 4, alignment(__darwin_pthread_handler_rec)
class _opaque_pthread_attr_t(Structure):
pass
_opaque_pthread_attr_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 36),
]
assert sizeof(_opaque_pthread_attr_t) == 40, sizeof(_opaque_pthread_attr_t)
assert alignment(_opaque_pthread_attr_t) == 4, alignment(_opaque_pthread_attr_t)
class _opaque_pthread_cond_t(Structure):
pass
_opaque_pthread_cond_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 24),
]
assert sizeof(_opaque_pthread_cond_t) == 28, sizeof(_opaque_pthread_cond_t)
assert alignment(_opaque_pthread_cond_t) == 4, alignment(_opaque_pthread_cond_t)
class _opaque_pthread_condattr_t(Structure):
pass
_opaque_pthread_condattr_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 4),
]
assert sizeof(_opaque_pthread_condattr_t) == 8, sizeof(_opaque_pthread_condattr_t)
assert alignment(_opaque_pthread_condattr_t) == 4, alignment(_opaque_pthread_condattr_t)
class _opaque_pthread_mutex_t(Structure):
pass
_opaque_pthread_mutex_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 40),
]
assert sizeof(_opaque_pthread_mutex_t) == 44, sizeof(_opaque_pthread_mutex_t)
assert alignment(_opaque_pthread_mutex_t) == 4, alignment(_opaque_pthread_mutex_t)
class _opaque_pthread_mutexattr_t(Structure):
pass
_opaque_pthread_mutexattr_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 8),
]
assert sizeof(_opaque_pthread_mutexattr_t) == 12, sizeof(_opaque_pthread_mutexattr_t)
assert alignment(_opaque_pthread_mutexattr_t) == 4, alignment(_opaque_pthread_mutexattr_t)
class _opaque_pthread_once_t(Structure):
pass
_opaque_pthread_once_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 4),
]
assert sizeof(_opaque_pthread_once_t) == 8, sizeof(_opaque_pthread_once_t)
assert alignment(_opaque_pthread_once_t) == 4, alignment(_opaque_pthread_once_t)
class _opaque_pthread_rwlock_t(Structure):
pass
_opaque_pthread_rwlock_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 124),
]
assert sizeof(_opaque_pthread_rwlock_t) == 128, sizeof(_opaque_pthread_rwlock_t)
assert alignment(_opaque_pthread_rwlock_t) == 4, alignment(_opaque_pthread_rwlock_t)
class _opaque_pthread_rwlockattr_t(Structure):
pass
_opaque_pthread_rwlockattr_t._fields_ = [
('__sig', c_long),
('__opaque', c_char * 12),
]
assert sizeof(_opaque_pthread_rwlockattr_t) == 16, sizeof(_opaque_pthread_rwlockattr_t)
assert alignment(_opaque_pthread_rwlockattr_t) == 4, alignment(_opaque_pthread_rwlockattr_t)
class _opaque_pthread_t(Structure):
pass
_opaque_pthread_t._fields_ = [
('__sig', c_long),
('__cleanup_stack', POINTER(__darwin_pthread_handler_rec)),
('__opaque', c_char * 596),
]
assert sizeof(_opaque_pthread_t) == 604, sizeof(_opaque_pthread_t)
assert alignment(_opaque_pthread_t) == 4, alignment(_opaque_pthread_t)
__darwin_blkcnt_t = __int64_t
__darwin_blksize_t = __int32_t
__darwin_fsblkcnt_t = c_uint
__darwin_fsfilcnt_t = c_uint
__darwin_gid_t = __uint32_t
__darwin_id_t = __uint32_t
__darwin_ino_t = __uint32_t
__darwin_mach_port_name_t = __darwin_natural_t
__darwin_mach_port_t = __darwin_mach_port_name_t
__darwin_mcontext_t = POINTER(mcontext)
__darwin_mcontext64_t = POINTER(mcontext64)
__darwin_pid_t = __int32_t
__darwin_pthread_attr_t = _opaque_pthread_attr_t
__darwin_pthread_cond_t = _opaque_pthread_cond_t
__darwin_pthread_condattr_t = _opaque_pthread_condattr_t
__darwin_pthread_key_t = c_ulong
__darwin_pthread_mutex_t = _opaque_pthread_mutex_t
__darwin_pthread_mutexattr_t = _opaque_pthread_mutexattr_t
__darwin_pthread_once_t = _opaque_pthread_once_t
__darwin_pthread_rwlock_t = _opaque_pthread_rwlock_t
__darwin_pthread_rwlockattr_t = _opaque_pthread_rwlockattr_t
__darwin_pthread_t = POINTER(_opaque_pthread_t)
__darwin_sigset_t = __uint32_t
__darwin_suseconds_t = __int32_t
__darwin_uid_t = __uint32_t
__darwin_useconds_t = __uint32_t
__darwin_uuid_t = c_ubyte * 16
class sigaltstack(Structure):
pass
sigaltstack._fields_ = [
('ss_sp', c_void_p),
('ss_size', __darwin_size_t),
('ss_flags', c_int),
]
assert sizeof(sigaltstack) == 12, sizeof(sigaltstack)
assert alignment(sigaltstack) == 4, alignment(sigaltstack)
__darwin_stack_t = sigaltstack
class ucontext(Structure):
pass
ucontext._fields_ = [
('uc_onstack', c_int),
('uc_sigmask', __darwin_sigset_t),
('uc_stack', __darwin_stack_t),
('uc_link', POINTER(ucontext)),
('uc_mcsize', __darwin_size_t),
('uc_mcontext', __darwin_mcontext_t),
]
assert sizeof(ucontext) == 32, sizeof(ucontext)
assert alignment(ucontext) == 4, alignment(ucontext)
__darwin_ucontext_t = ucontext
class ucontext64(Structure):
pass
ucontext64._fields_ = [
('uc_onstack', c_int),
('uc_sigmask', __darwin_sigset_t),
('uc_stack', __darwin_stack_t),
('uc_link', POINTER(ucontext64)),
('uc_mcsize', __darwin_size_t),
('uc_mcontext64', __darwin_mcontext64_t),
]
assert sizeof(ucontext64) == 32, sizeof(ucontext64)
assert alignment(ucontext64) == 4, alignment(ucontext64)
__darwin_ucontext64_t = ucontext64
class timeval(Structure):
pass
timeval._fields_ = [
('tv_sec', __darwin_time_t),
('tv_usec', __darwin_suseconds_t),
]
assert sizeof(timeval) == 8, sizeof(timeval)
assert alignment(timeval) == 4, alignment(timeval)
rlim_t = __int64_t
class rusage(Structure):
pass
rusage._fields_ = [
('ru_utime', timeval),
('ru_stime', timeval),
('ru_maxrss', c_long),
('ru_ixrss', c_long),
('ru_idrss', c_long),
('ru_isrss', c_long),
('ru_minflt', c_long),
('ru_majflt', c_long),
('ru_nswap', c_long),
('ru_inblock', c_long),
('ru_oublock', c_long),
('ru_msgsnd', c_long),
('ru_msgrcv', c_long),
('ru_nsignals', c_long),
('ru_nvcsw', c_long),
('ru_nivcsw', c_long),
]
assert sizeof(rusage) == 72, sizeof(rusage)
assert alignment(rusage) == 4, alignment(rusage)
class rlimit(Structure):
pass
rlimit._pack_ = 4
rlimit._fields_ = [
('rlim_cur', rlim_t),
('rlim_max', rlim_t),
]
assert sizeof(rlimit) == 16, sizeof(rlimit)
assert alignment(rlimit) == 4, alignment(rlimit)
mcontext_t = __darwin_mcontext_t
mcontext64_t = __darwin_mcontext64_t
pthread_attr_t = __darwin_pthread_attr_t
sigset_t = __darwin_sigset_t
ucontext_t = __darwin_ucontext_t
ucontext64_t = __darwin_ucontext64_t
uid_t = __darwin_uid_t
class sigval(Union):
pass
sigval._fields_ = [
('sival_int', c_int),
('sival_ptr', c_void_p),
]
assert sizeof(sigval) == 4, sizeof(sigval)
assert alignment(sigval) == 4, alignment(sigval)
class sigevent(Structure):
pass
sigevent._fields_ = [
('sigev_notify', c_int),
('sigev_signo', c_int),
('sigev_value', sigval),
('sigev_notify_function', CFUNCTYPE(None, sigval)),
('sigev_notify_attributes', POINTER(pthread_attr_t)),
]
assert sizeof(sigevent) == 20, sizeof(sigevent)
assert alignment(sigevent) == 4, alignment(sigevent)
class __siginfo(Structure):
pass
pid_t = __darwin_pid_t
__siginfo._fields_ = [
('si_signo', c_int),
('si_errno', c_int),
('si_code', c_int),
('si_pid', pid_t),
('si_uid', uid_t),
('si_status', c_int),
('si_addr', c_void_p),
('si_value', sigval),
('si_band', c_long),
('pad', c_ulong * 7),
]
assert sizeof(__siginfo) == 64, sizeof(__siginfo)
assert alignment(__siginfo) == 4, alignment(__siginfo)
siginfo_t = __siginfo
class __sigaction_u(Union):
pass
__sigaction_u._fields_ = [
('__sa_handler', CFUNCTYPE(None, c_int)),
('__sa_sigaction', CFUNCTYPE(None, c_int, POINTER(__siginfo), c_void_p)),
]
assert sizeof(__sigaction_u) == 4, sizeof(__sigaction_u)
assert alignment(__sigaction_u) == 4, alignment(__sigaction_u)
class __sigaction(Structure):
pass
__sigaction._fields_ = [
('__sigaction_u', __sigaction_u),
('sa_tramp', CFUNCTYPE(None, c_void_p, c_int, c_int, POINTER(siginfo_t), c_void_p)),
('sa_mask', sigset_t),
('sa_flags', c_int),
]
assert sizeof(__sigaction) == 16, sizeof(__sigaction)
assert alignment(__sigaction) == 4, alignment(__sigaction)
class sigaction(Structure):
pass
sigaction._fields_ = [
('__sigaction_u', __sigaction_u),
('sa_mask', sigset_t),
('sa_flags', c_int),
]
assert sizeof(sigaction) == 12, sizeof(sigaction)
assert alignment(sigaction) == 4, alignment(sigaction)
sig_t = CFUNCTYPE(None, c_int)
stack_t = __darwin_stack_t
class sigvec(Structure):
pass
sigvec._fields_ = [
('sv_handler', CFUNCTYPE(None, c_int)),
('sv_mask', c_int),
('sv_flags', c_int),
]
assert sizeof(sigvec) == 12, sizeof(sigvec)
assert alignment(sigvec) == 4, alignment(sigvec)
class sigstack(Structure):
pass
sigstack._fields_ = [
('ss_sp', STRING),
('ss_onstack', c_int),
]
assert sizeof(sigstack) == 8, sizeof(sigstack)
assert alignment(sigstack) == 4, alignment(sigstack)
u_char = c_ubyte
u_short = c_ushort
u_int = c_uint
u_long = c_ulong
ushort = c_ushort
uint = c_uint
u_quad_t = u_int64_t
quad_t = int64_t
qaddr_t = POINTER(quad_t)
caddr_t = STRING
daddr_t = int32_t
fixpt_t = u_int32_t
blkcnt_t = __darwin_blkcnt_t
blksize_t = __darwin_blksize_t
gid_t = __darwin_gid_t
in_addr_t = __uint32_t
in_port_t = __uint16_t
ino_t = __darwin_ino_t
key_t = __int32_t
nlink_t = __uint16_t
off_t = __darwin_off_t
segsz_t = int32_t
swblk_t = int32_t
clock_t = __darwin_clock_t
ssize_t = __darwin_ssize_t
useconds_t = __darwin_useconds_t
suseconds_t = __darwin_suseconds_t
fd_mask = __int32_t
class fd_set(Structure):
pass
fd_set._fields_ = [
('fds_bits', __int32_t * 32),
]
assert sizeof(fd_set) == 128, sizeof(fd_set)
assert alignment(fd_set) == 4, alignment(fd_set)
pthread_cond_t = __darwin_pthread_cond_t
pthread_condattr_t = __darwin_pthread_condattr_t
pthread_mutex_t = __darwin_pthread_mutex_t
pthread_mutexattr_t = __darwin_pthread_mutexattr_t
pthread_once_t = __darwin_pthread_once_t
pthread_rwlock_t = __darwin_pthread_rwlock_t
pthread_rwlockattr_t = __darwin_pthread_rwlockattr_t
pthread_t = __darwin_pthread_t
pthread_key_t = __darwin_pthread_key_t
fsblkcnt_t = __darwin_fsblkcnt_t
fsfilcnt_t = __darwin_fsfilcnt_t
# values for enumeration 'idtype_t'
idtype_t = c_int # enum
id_t = __darwin_id_t
class wait(Union):
pass
class N4wait3DOLLAR_3E(Structure):
pass
N4wait3DOLLAR_3E._fields_ = [
('w_Termsig', c_uint, 7),
('w_Coredump', c_uint, 1),
('w_Retcode', c_uint, 8),
('w_Filler', c_uint, 16),
]
assert sizeof(N4wait3DOLLAR_3E) == 4, sizeof(N4wait3DOLLAR_3E)
assert alignment(N4wait3DOLLAR_3E) == 4, alignment(N4wait3DOLLAR_3E)
class N4wait3DOLLAR_4E(Structure):
pass
N4wait3DOLLAR_4E._fields_ = [
('w_Stopval', c_uint, 8),
('w_Stopsig', c_uint, 8),
('w_Filler', c_uint, 16),
]
assert sizeof(N4wait3DOLLAR_4E) == 4, sizeof(N4wait3DOLLAR_4E)
assert alignment(N4wait3DOLLAR_4E) == 4, alignment(N4wait3DOLLAR_4E)
wait._fields_ = [
('w_status', c_int),
('w_T', N4wait3DOLLAR_3E),
('w_S', N4wait3DOLLAR_4E),
]
assert sizeof(wait) == 4, sizeof(wait)
assert alignment(wait) == 4, alignment(wait)
class timespec(Structure):
pass
timespec._fields_ = [
('tv_sec', time_t),
('tv_nsec', c_long),
]
assert sizeof(timespec) == 8, sizeof(timespec)
assert alignment(timespec) == 4, alignment(timespec)
class tm(Structure):
pass
tm._fields_ = [
('tm_sec', c_int),
('tm_min', c_int),
('tm_hour', c_int),
('tm_mday', c_int),
('tm_mon', c_int),
('tm_year', c_int),
('tm_wday', c_int),
('tm_yday', c_int),
('tm_isdst', c_int),
('tm_gmtoff', c_long),
('tm_zone', STRING),
]
assert sizeof(tm) == 44, sizeof(tm)
assert alignment(tm) == 4, alignment(tm)
__gnuc_va_list = STRING
ptrdiff_t = c_int
int8_t = c_byte
int16_t = c_short
uint8_t = c_ubyte
uint16_t = c_ushort
uint32_t = c_uint
uint64_t = c_ulonglong
int_least8_t = int8_t
int_least16_t = int16_t
int_least32_t = int32_t
int_least64_t = int64_t
uint_least8_t = uint8_t
uint_least16_t = uint16_t
uint_least32_t = uint32_t
uint_least64_t = uint64_t
int_fast8_t = int8_t
int_fast16_t = int16_t
int_fast32_t = int32_t
int_fast64_t = int64_t
uint_fast8_t = uint8_t
uint_fast16_t = uint16_t
uint_fast32_t = uint32_t
uint_fast64_t = uint64_t
intptr_t = c_long
uintptr_t = c_ulong
intmax_t = c_longlong
uintmax_t = c_ulonglong
__all__ = ['ENGINE', 'pkcs7_enc_content_st', '__int16_t',
'X509_REVOKED', 'SSL_CTX', 'UIT_BOOLEAN',
'__darwin_time_t', 'ucontext64_t', 'int_fast32_t',
'pem_ctx_st', 'uint8_t', 'fpos_t', 'X509', 'COMP_CTX',
'tm', 'N10pem_ctx_st4DOLLAR_17E', 'swblk_t',
'ASN1_TEMPLATE', '__darwin_pthread_t', 'fixpt_t',
'BIO_METHOD', 'ASN1_PRINTABLESTRING', 'EVP_ENCODE_CTX',
'dh_method', 'bio_f_buffer_ctx_struct', 'in_port_t',
'X509_SIG', '__darwin_ssize_t', '__darwin_sigset_t',
'wait', 'uint_fast16_t', 'N12asn1_type_st4DOLLAR_11E',
'uint_least8_t', 'pthread_rwlock_t', 'ASN1_IA5STRING',
'fsfilcnt_t', 'ucontext', '__uint64_t', 'timespec',
'x509_cinf_st', 'COMP_METHOD', 'MD5_CTX', 'buf_mem_st',
'ASN1_ENCODING_st', 'PBEPARAM', 'X509_NAME_ENTRY',
'__darwin_va_list', 'ucontext_t', 'lhash_st',
'N4wait3DOLLAR_4E', '__darwin_uuid_t',
'_ossl_old_des_ks_struct', 'id_t', 'ASN1_BIT_STRING',
'va_list', '__darwin_wchar_t', 'pthread_key_t',
'pkcs7_signer_info_st', 'ASN1_METHOD', 'DSA_SIG', 'DSA',
'UIT_NONE', 'pthread_t', '__darwin_useconds_t',
'uint_fast8_t', 'UI_STRING', 'DES_cblock',
'__darwin_mcontext64_t', 'rlim_t', 'PEM_Encode_Seal_st',
'SHAstate_st', 'u_quad_t', 'openssl_fptr',
'_opaque_pthread_rwlockattr_t',
'N18x509_attributes_st4DOLLAR_13E',
'__darwin_pthread_rwlock_t', 'daddr_t', 'ui_string_st',
'x509_file_st', 'X509_req_info_st', 'int_least64_t',
'evp_Encode_Ctx_st', 'X509_OBJECTS', 'CRYPTO_EX_DATA',
'__int8_t', 'AUTHORITY_KEYID_st', '_opaque_pthread_attr_t',
'sigstack', 'EVP_CIPHER_CTX', 'X509_extension_st', 'pid_t',
'RSA_METHOD', 'PEM_USER', 'pem_recip_st', 'env_md_ctx_st',
'rc5_key_st', 'ui_st', 'X509_PUBKEY', 'u_int8_t',
'ASN1_ITEM_st', 'pkcs7_recip_info_st', 'ssl2_state_st',
'off_t', 'N10ssl_ctx_st4DOLLAR_18E', 'crypto_ex_data_st',
'ui_method_st', '__darwin_pthread_rwlockattr_t',
'CRYPTO_EX_dup', '__darwin_ino_t', '__sFILE',
'OSUnknownByteOrder', 'BN_MONT_CTX', 'ASN1_NULL', 'time_t',
'CRYPTO_EX_new', 'asn1_type_st', 'CRYPTO_EX_DATA_FUNCS',
'user_time_t', 'BIGNUM', 'pthread_rwlockattr_t',
'ASN1_VALUE_st', 'DH_METHOD', '__darwin_off_t',
'_opaque_pthread_t', 'bn_blinding_st', 'RSA', 'ssize_t',
'mcontext64_t', 'user_long_t', 'fsblkcnt_t', 'cert_st',
'__darwin_pthread_condattr_t', 'X509_PKEY',
'__darwin_id_t', '__darwin_nl_item', 'SSL2_STATE', 'FILE',
'pthread_mutexattr_t', 'size_t',
'_ossl_old_des_key_schedule', 'pkcs7_issuer_and_serial_st',
'sigval', 'CRYPTO_MEM_LEAK_CB', 'X509_NAME', 'blkcnt_t',
'uint_least16_t', '__darwin_dev_t', 'evp_cipher_info_st',
'BN_BLINDING', 'ssl3_state_st', 'uint_least64_t',
'user_addr_t', 'DES_key_schedule', 'RIPEMD160_CTX',
'u_char', 'X509_algor_st', 'uid_t', 'sess_cert_st',
'u_int64_t', 'u_int16_t', 'sigset_t', '__darwin_ptrdiff_t',
'ASN1_CTX', 'STACK', '__int32_t', 'UI_METHOD',
'NETSCAPE_SPKI', 'UIT_PROMPT', 'st_CRYPTO_EX_DATA_IMPL',
'cast_key_st', 'X509_HASH_DIR_CTX', 'sigevent',
'user_ssize_t', 'clock_t', 'aes_key_st',
'__darwin_socklen_t', '__darwin_intptr_t', 'int_fast64_t',
'asn1_string_table_st', 'uint_fast32_t',
'ASN1_VISIBLESTRING', 'DSA_SIG_st', 'obj_name_st',
'X509_LOOKUP_METHOD', 'u_int32_t', 'EVP_CIPHER_INFO',
'__gnuc_va_list', 'AES_KEY', 'PKCS7_ISSUER_AND_SERIAL',
'BN_CTX', '__darwin_blkcnt_t', 'key_t', 'SHA_CTX',
'pkcs7_signed_st', 'SSL', 'N10pem_ctx_st4DOLLAR_16E',
'pthread_attr_t', 'EVP_MD', 'uint', 'ASN1_BOOLEAN',
'ino_t', '__darwin_clock_t', 'ASN1_OCTET_STRING',
'asn1_ctx_st', 'BIO_F_BUFFER_CTX', 'bn_mont_ctx_st',
'X509_REQ_INFO', 'PEM_CTX', 'sigvec',
'__darwin_pthread_mutexattr_t', 'x509_attributes_st',
'stack_t', '__darwin_mode_t', '__mbstate_t',
'asn1_object_st', 'ASN1_ENCODING', '__uint8_t',
'LHASH_NODE', 'PKCS7_SIGNER_INFO', 'asn1_method_st',
'stack_st', 'bio_info_cb', 'div_t', 'UIT_VERIFY',
'PBEPARAM_st', 'N4wait3DOLLAR_3E', 'quad_t', '__siginfo',
'__darwin_mbstate_t', 'rsa_st', 'ASN1_UNIVERSALSTRING',
'uint64_t', 'ssl_comp_st', 'X509_OBJECT', 'pthread_cond_t',
'DH', '__darwin_wctype_t', 'PKCS7_ENVELOPE', 'ASN1_TLC_st',
'sig_atomic_t', 'BIO', 'nlink_t', 'BUF_MEM', 'SSL3_RECORD',
'bio_method_st', 'timeval', 'UI_string_types', 'BIO_dummy',
'ssl_ctx_st', 'NETSCAPE_CERT_SEQUENCE',
'BIT_STRING_BITNAME_st', '__darwin_pthread_attr_t',
'int8_t', '__darwin_wint_t', 'OBJ_NAME',
'PKCS8_PRIV_KEY_INFO', 'PBE2PARAM_st',
'LHASH_DOALL_FN_TYPE', 'x509_st', 'X509_VAL', 'dev_t',
'ASN1_TEMPLATE_st', 'MD5state_st', '__uint16_t',
'LHASH_DOALL_ARG_FN_TYPE', 'mdc2_ctx_st', 'SSL3_STATE',
'ssl3_buffer_st', 'ASN1_ITEM_EXP',
'_opaque_pthread_condattr_t', 'mode_t', 'ASN1_VALUE',
'qaddr_t', '__darwin_gid_t', 'EVP_PKEY', 'CRYPTO_EX_free',
'_ossl_old_des_cblock', 'X509_INFO', 'asn1_string_st',
'intptr_t', 'UIT_INFO', 'int_fast8_t', 'sigaltstack',
'env_md_st', 'LHASH', '__darwin_ucontext_t',
'PKCS7_SIGN_ENVELOPE', '__darwin_mcontext_t', 'ct_rune_t',
'MD2_CTX', 'pthread_once_t', 'SSL3_BUFFER', 'fd_mask',
'ASN1_TYPE', 'PKCS7_SIGNED', 'ssl3_record_st', 'BF_KEY',
'MD4state_st', 'MD4_CTX', 'int16_t', 'SSL_CIPHER',
'rune_t', 'X509_TRUST', 'siginfo_t', 'X509_STORE',
'__sbuf', 'X509_STORE_CTX', '__darwin_blksize_t', 'ldiv_t',
'ASN1_TIME', 'SSL_METHOD', 'X509_LOOKUP',
'Netscape_spki_st', 'P_PID', 'sigaction', 'sig_t',
'hostent', 'x509_cert_aux_st', '_opaque_pthread_cond_t',
'segsz_t', 'ushort', '__darwin_ct_rune_t', 'fd_set',
'BN_RECP_CTX', 'x509_lookup_st', 'uint16_t', 'pkcs7_st',
'asn1_header_st', '__darwin_pthread_key_t',
'x509_trust_st', '__darwin_pthread_handler_rec', 'int32_t',
'X509_CRL_INFO', 'N11evp_pkey_st4DOLLAR_12E', 'MDC2_CTX',
'N23_ossl_old_des_ks_struct4DOLLAR_10E', 'ASN1_HEADER',
'X509_crl_info_st', 'LHASH_HASH_FN_TYPE',
'_opaque_pthread_mutexattr_t', 'ssl_st',
'N8pkcs7_st4DOLLAR_15E', 'evp_pkey_st',
'pkcs7_signedandenveloped_st', '__darwin_mach_port_t',
'EVP_PBE_KEYGEN', '_opaque_pthread_mutex_t',
'ASN1_UTCTIME', 'mcontext', 'crypto_ex_data_func_st',
'u_long', 'PBKDF2PARAM_st', 'rc4_key_st', 'DSA_METHOD',
'EVP_CIPHER', 'BIT_STRING_BITNAME', 'PKCS7_RECIP_INFO',
'ssl3_enc_method', 'X509_CERT_AUX', 'uintmax_t',
'int_fast16_t', 'RC5_32_KEY', 'ucontext64', 'ASN1_INTEGER',
'u_short', 'N14x509_object_st4DOLLAR_14E', 'mcontext64',
'X509_sig_st', 'ASN1_GENERALSTRING', 'PKCS7', '__sFILEX',
'X509_name_entry_st', 'ssl_session_st', 'caddr_t',
'bignum_st', 'X509_CINF', '__darwin_pthread_cond_t',
'ASN1_TLC', 'PKCS7_ENCRYPT', 'NETSCAPE_SPKAC',
'Netscape_spkac_st', 'idtype_t', 'UIT_ERROR',
'uint_fast64_t', 'in_addr_t', 'pthread_mutex_t',
'__int64_t', 'ASN1_BMPSTRING', 'uint32_t',
'PEM_ENCODE_SEAL_CTX', 'suseconds_t', 'ASN1_OBJECT',
'X509_val_st', 'private_key_st', 'CRYPTO_dynlock',
'X509_objects_st', 'CRYPTO_EX_DATA_IMPL',
'pthread_condattr_t', 'PKCS7_DIGEST', 'uint_least32_t',
'ASN1_STRING', '__uint32_t', 'P_PGID', 'rsa_meth_st',
'X509_crl_st', 'RC2_KEY', '__darwin_fsfilcnt_t',
'X509_revoked_st', 'PBE2PARAM', 'blksize_t',
'Netscape_certificate_sequence', 'ssl_cipher_st',
'bignum_ctx', 'register_t', 'ASN1_UTF8STRING',
'pkcs7_encrypted_st', 'RC4_KEY', '__darwin_ucontext64_t',
'N13ssl2_state_st4DOLLAR_19E', 'bn_recp_ctx_st',
'CAST_KEY', 'X509_ATTRIBUTE', '__darwin_suseconds_t',
'__sigaction', 'user_ulong_t', 'syscall_arg_t',
'evp_cipher_ctx_st', 'X509_ALGOR', 'mcontext_t',
'const_DES_cblock', '__darwin_fsblkcnt_t', 'dsa_st',
'int_least8_t', 'MD2state_st', 'X509_EXTENSION',
'GEN_SESSION_CB', 'int_least16_t', '__darwin_wctrans_t',
'PBKDF2PARAM', 'x509_lookup_method_st', 'pem_password_cb',
'X509_info_st', 'x509_store_st', '__darwin_natural_t',
'X509_pubkey_st', 'pkcs7_digest_st', '__darwin_size_t',
'ASN1_STRING_TABLE', 'OSLittleEndian', 'RIPEMD160state_st',
'pkcs7_enveloped_st', 'UI', 'ptrdiff_t', 'X509_REQ',
'CRYPTO_dynlock_value', 'X509_req_st', 'x509_store_ctx_st',
'N13ssl3_state_st4DOLLAR_20E', 'lhash_node_st',
'__darwin_pthread_mutex_t', 'LHASH_COMP_FN_TYPE',
'__darwin_rune_t', 'rlimit', '__darwin_pthread_once_t',
'OSBigEndian', 'uintptr_t', '__darwin_uid_t', 'u_int',
'ASN1_T61STRING', 'gid_t', 'ssl_method_st', 'ASN1_ITEM',
'ASN1_ENUMERATED', '_opaque_pthread_rwlock_t',
'pkcs8_priv_key_info_st', 'intmax_t', 'sigcontext',
'X509_CRL', 'rc2_key_st', 'engine_st', 'x509_object_st',
'_opaque_pthread_once_t', 'DES_ks', 'SSL_COMP',
'dsa_method', 'int64_t', 'bio_st', 'bf_key_st',
'ASN1_GENERALIZEDTIME', 'PKCS7_ENC_CONTENT',
'__darwin_pid_t', 'lldiv_t', 'comp_method_st',
'EVP_MD_CTX', 'evp_cipher_st', 'X509_name_st',
'x509_hash_dir_st', '__darwin_mach_port_name_t',
'useconds_t', 'user_size_t', 'SSL_SESSION', 'rusage',
'ssl_crock_st', 'int_least32_t', '__sigaction_u', 'dh_st',
'P_ALL', '__darwin_stack_t', 'N6DES_ks3DOLLAR_9E',
'comp_ctx_st', 'X509_CERT_FILE_CTX']
|
d-lee/airflow
|
refs/heads/master
|
airflow/contrib/hooks/__init__.py
|
19
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contrib hooks are not imported by default. They should be accessed
# directly: from airflow.contrib.hooks.hook_module import Hook
import sys
# ------------------------------------------------------------------------
#
# #TODO #FIXME Airflow 2.0
#
# Old import machinary below.
#
# This is deprecated but should be kept until Airflow 2.0
# for compatibility.
#
# ------------------------------------------------------------------------
_hooks = {
'ftp_hook': ['FTPHook'],
'ftps_hook': ['FTPSHook'],
'vertica_hook': ['VerticaHook'],
'ssh_hook': ['SSHHook'],
'bigquery_hook': ['BigQueryHook'],
'qubole_hook': ['QuboleHook'],
'gcs_hook': ['GoogleCloudStorageHook'],
'datastore_hook': ['DatastoreHook'],
'gcp_dataproc_hook': ['DataProcHook'],
'gcp_dataflow_hook': ['DataFlowHook'],
'cloudant_hook': ['CloudantHook'],
'fs_hook': ['FSHook']
}
import os as _os
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _hooks)
|
etzhou/edx-platform
|
refs/heads/master
|
lms/djangoapps/class_dashboard/dashboard_data.py
|
99
|
"""
Computes the data to display on the Instructor Dashboard
"""
from util.json_request import JsonResponse
import json
from courseware import models
from django.db.models import Count
from django.utils.translation import ugettext as _
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
from instructor_analytics.csvs import create_csv_response
from opaque_keys.edx.locations import Location
# Used to limit the length of list displayed to the screen.
MAX_SCREEN_LIST_LENGTH = 250
def get_problem_grade_distribution(course_id):
"""
Returns the grade distribution per problem for the course
`course_id` the course ID for the course interested in
Output is 2 dicts:
'prob-grade_distrib' where the key is the problem 'module_id' and the value is a dict with:
'max_grade' - max grade for this problem
'grade_distrib' - array of tuples (`grade`,`count`).
'total_student_count' where the key is problem 'module_id' and the value is number of students
attempting the problem
"""
# Aggregate query on studentmodule table for grade data for all problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
).values('module_state_key', 'grade', 'max_grade').annotate(count_grade=Count('grade'))
prob_grade_distrib = {}
total_student_count = {}
# Loop through resultset building data for each problem
for row in db_query:
curr_problem = course_id.make_usage_key_from_deprecated_string(row['module_state_key'])
# Build set of grade distributions for each problem that has student responses
if curr_problem in prob_grade_distrib:
prob_grade_distrib[curr_problem]['grade_distrib'].append((row['grade'], row['count_grade']))
if (prob_grade_distrib[curr_problem]['max_grade'] != row['max_grade']) and \
(prob_grade_distrib[curr_problem]['max_grade'] < row['max_grade']):
prob_grade_distrib[curr_problem]['max_grade'] = row['max_grade']
else:
prob_grade_distrib[curr_problem] = {
'max_grade': row['max_grade'],
'grade_distrib': [(row['grade'], row['count_grade'])]
}
# Build set of total students attempting each problem
total_student_count[curr_problem] = total_student_count.get(curr_problem, 0) + row['count_grade']
return prob_grade_distrib, total_student_count
def get_sequential_open_distrib(course_id):
"""
Returns the number of students that opened each subsection/sequential of the course
`course_id` the course ID for the course interested in
Outputs a dict mapping the 'module_id' to the number of students that have opened that subsection/sequential.
"""
# Aggregate query on studentmodule table for "opening a subsection" data
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
module_type__exact="sequential",
).values('module_state_key').annotate(count_sequential=Count('module_state_key'))
# Build set of "opened" data for each subsection that has "opened" data
sequential_open_distrib = {}
for row in db_query:
row_loc = course_id.make_usage_key_from_deprecated_string(row['module_state_key'])
sequential_open_distrib[row_loc] = row['count_sequential']
return sequential_open_distrib
def get_problem_set_grade_distrib(course_id, problem_set):
"""
Returns the grade distribution for the problems specified in `problem_set`.
`course_id` the course ID for the course interested in
`problem_set` an array of UsageKeys representing problem module_id's.
Requests from the database the a count of each grade for each problem in the `problem_set`.
Returns a dict, where the key is the problem 'module_id' and the value is a dict with two parts:
'max_grade' - the maximum grade possible for the course
'grade_distrib' - array of tuples (`grade`,`count`) ordered by `grade`
"""
# Aggregate query on studentmodule table for grade data for set of problems in course
db_query = models.StudentModule.objects.filter(
course_id__exact=course_id,
grade__isnull=False,
module_type__exact="problem",
module_state_key__in=problem_set,
).values(
'module_state_key',
'grade',
'max_grade',
).annotate(count_grade=Count('grade')).order_by('module_state_key', 'grade')
prob_grade_distrib = {}
# Loop through resultset building data for each problem
for row in db_query:
row_loc = course_id.make_usage_key_from_deprecated_string(row['module_state_key'])
if row_loc not in prob_grade_distrib:
prob_grade_distrib[row_loc] = {
'max_grade': 0,
'grade_distrib': [],
}
curr_grade_distrib = prob_grade_distrib[row_loc]
curr_grade_distrib['grade_distrib'].append((row['grade'], row['count_grade']))
if curr_grade_distrib['max_grade'] < row['max_grade']:
curr_grade_distrib['max_grade'] = row['max_grade']
return prob_grade_distrib
def get_d3_problem_grade_distrib(course_id):
"""
Returns problem grade distribution information for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array of dicts in the order of the sections. Each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of the grade distribution for that problem
"""
prob_grade_distrib, total_student_count = get_problem_grade_distribution(course_id)
d3_data = []
# Retrieve course object down to problems
course = modulestore().get_course(course_id, depth=4)
# Iterate through sections, subsections, units, problems
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
for subsection in section.get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
# Student data is at the problem level
if child.location.category == 'problem':
c_problem += 1
stack_data = []
# Construct label to display for this problem
label = "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem)
# Only problems in prob_grade_distrib have had a student submission.
if child.location in prob_grade_distrib:
# Get max_grade, grade_distribution for this problem
problem_info = prob_grade_distrib[child.location]
# Get problem_name for tooltip
problem_name = own_metadata(child).get('display_name', '')
# Compute percent of this grade over max_grade
max_grade = float(problem_info['max_grade'])
for (grade, count_grade) in problem_info['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
# Compute percent of students with this grade
student_count_percent = 0
if total_student_count.get(child.location, 0) > 0:
student_count_percent = count_grade * 100 / total_student_count[child.location]
# Tooltip parameters for problem in grade distribution view
tooltip = {
'type': 'problem',
'label': label,
'problem_name': problem_name,
'count_grade': count_grade,
'percent': percent,
'grade': grade,
'max_grade': max_grade,
'student_count_percent': student_count_percent,
}
# Construct data to be sent to d3
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
'module_url': child.location.to_deprecated_string(),
})
problem = {
'xValue': label,
'stackData': stack_data,
}
data.append(problem)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_sequential_open_distrib(course_id):
"""
Returns how many students opened a sequential/subsection for each section, data already in format for d3 function.
`course_id` the course ID for the course interested in
Returns an array in the order of the sections and each dict has:
'display_name' - display name for the section
'data' - data for the d3_stacked_bar_graph function of how many students opened each sequential/subsection
"""
sequential_open_distrib = get_sequential_open_distrib(course_id)
d3_data = []
# Retrieve course object down to subsection
course = modulestore().get_course(course_id, depth=2)
# Iterate through sections, subsections
for section in course.get_children():
curr_section = {}
curr_section['display_name'] = own_metadata(section).get('display_name', '')
data = []
c_subsection = 0
# Construct data for each subsection to be sent to d3
for subsection in section.get_children():
c_subsection += 1
subsection_name = own_metadata(subsection).get('display_name', '')
num_students = 0
if subsection.location in sequential_open_distrib:
num_students = sequential_open_distrib[subsection.location]
stack_data = []
# Tooltip parameters for subsection in open_distribution view
tooltip = {
'type': 'subsection',
'num_students': num_students,
'subsection_num': c_subsection,
'subsection_name': subsection_name
}
stack_data.append({
'color': 0,
'value': num_students,
'tooltip': tooltip,
'module_url': subsection.location.to_deprecated_string(),
})
subsection = {
'xValue': "SS {0}".format(c_subsection),
'stackData': stack_data,
}
data.append(subsection)
curr_section['data'] = data
d3_data.append(curr_section)
return d3_data
def get_d3_section_grade_distrib(course_id, section):
"""
Returns the grade distribution for the problems in the `section` section in a format for the d3 code.
`course_id` a string that is the course's ID.
`section` an int that is a zero-based index into the course's list of sections.
Navigates to the section specified to find all the problems associated with that section and then finds the grade
distribution for those problems. Finally returns an object formated the way the d3_stacked_bar_graph.js expects its
data object to be in.
If this is requested multiple times quickly for the same course, it is better to call
get_d3_problem_grade_distrib and pick out the sections of interest.
Returns an array of dicts with the following keys (taken from d3_stacked_bar_graph.js's documentation)
'xValue' - Corresponding value for the x-axis
'stackData' - Array of objects with key, value pairs that represent a bar:
'color' - Defines what "color" the bar will map to
'value' - Maps to the height of the bar, along the y-axis
'tooltip' - (Optional) Text to display on mouse hover
"""
# Retrieve course object down to problems
course = modulestore().get_course(course_id, depth=4)
problem_set = []
problem_info = {}
c_subsection = 0
for subsection in course.get_children()[section].get_children():
c_subsection += 1
c_unit = 0
for unit in subsection.get_children():
c_unit += 1
c_problem = 0
for child in unit.get_children():
if child.location.category == 'problem':
c_problem += 1
problem_set.append(child.location)
problem_info[child.location] = {
'id': child.location.to_deprecated_string(),
'x_value': "P{0}.{1}.{2}".format(c_subsection, c_unit, c_problem),
'display_name': own_metadata(child).get('display_name', ''),
}
# Retrieve grade distribution for these problems
grade_distrib = get_problem_set_grade_distrib(course_id, problem_set)
d3_data = []
# Construct data for each problem to be sent to d3
for problem in problem_set:
stack_data = []
if problem in grade_distrib: # Some problems have no data because students have not tried them yet.
max_grade = float(grade_distrib[problem]['max_grade'])
for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:
percent = 0.0
if max_grade > 0:
percent = round((grade * 100.0) / max_grade, 1)
# Construct tooltip for problem in grade distibution view
tooltip = {
'type': 'problem',
'problem_info_x': problem_info[problem]['x_value'],
'count_grade': count_grade,
'percent': percent,
'problem_info_n': problem_info[problem]['display_name'],
'grade': grade,
'max_grade': max_grade,
}
stack_data.append({
'color': percent,
'value': count_grade,
'tooltip': tooltip,
})
d3_data.append({
'xValue': problem_info[problem]['x_value'],
'stackData': stack_data,
})
return d3_data
def get_section_display_name(course_id):
"""
Returns an array of the display names for each section in the course.
`course_id` the course ID for the course interested in
The ith string in the array is the display name of the ith section in the course.
"""
course = modulestore().get_course(course_id, depth=4)
section_display_name = [""] * len(course.get_children())
i = 0
for section in course.get_children():
section_display_name[i] = own_metadata(section).get('display_name', '')
i += 1
return section_display_name
def get_array_section_has_problem(course_id):
"""
Returns an array of true/false whether each section has problems.
`course_id` the course ID for the course interested in
The ith value in the array is true if the ith section in the course contains problems and false otherwise.
"""
course = modulestore().get_course(course_id, depth=4)
b_section_has_problem = [False] * len(course.get_children())
i = 0
for section in course.get_children():
for subsection in section.get_children():
for unit in subsection.get_children():
for child in unit.get_children():
if child.location.category == 'problem':
b_section_has_problem[i] = True
break # out of child loop
if b_section_has_problem[i]:
break # out of unit loop
if b_section_has_problem[i]:
break # out of subsection loop
i += 1
return b_section_has_problem
def get_students_opened_subsection(request, csv=False):
"""
Get a list of students that opened a particular subsection.
If 'csv' is False, returns a dict of student's name: username.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames for CSV download.
"""
module_state_key = Location.from_deprecated_string(request.GET.get('module_id'))
csv = request.GET.get('csv')
# Query for "opened a subsection" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key__exact=module_state_key,
module_type__exact='sequential',
).values('student__username', 'student__profile__name').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
results.append({
'name': student['student__profile__name'],
'username': student['student__username'],
})
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
# Subsection name is everything after 3rd space in tooltip
filename = sanitize_filename(' '.join(tooltip.split(' ')[3:]))
header = [_("Name").encode('utf-8'), _("Username").encode('utf-8')]
for student in students:
results.append([student['student__profile__name'], student['student__username']])
response = create_csv_response(filename, header, results)
return response
def get_students_problem_grades(request, csv=False):
"""
Get a list of students and grades for a particular problem.
If 'csv' is False, returns a dict of student's name: username: grade: percent.
If 'csv' is True, returns a header array, and an array of arrays in the format:
student names, usernames, grades, percents for CSV download.
"""
module_state_key = Location.from_deprecated_string(request.GET.get('module_id'))
csv = request.GET.get('csv')
# Query for "problem grades" students
students = models.StudentModule.objects.select_related('student').filter(
module_state_key=module_state_key,
module_type__exact='problem',
grade__isnull=False,
).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')
results = []
if not csv:
# Restrict screen list length
# Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH
# without doing another select.
for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:
student_dict = {
'name': student['student__profile__name'],
'username': student['student__username'],
'grade': student['grade'],
}
student_dict['percent'] = 0
if student['max_grade'] > 0:
student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])
results.append(student_dict)
max_exceeded = False
if len(results) > MAX_SCREEN_LIST_LENGTH:
# Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH
del results[-1]
max_exceeded = True
response_payload = {
'results': results,
'max_exceeded': max_exceeded,
}
return JsonResponse(response_payload)
else:
tooltip = request.GET.get('tooltip')
filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])
header = [_("Name").encode('utf-8'), _("Username").encode('utf-8'), _("Grade").encode('utf-8'), _("Percent").encode('utf-8')]
for student in students:
percent = 0
if student['max_grade'] > 0:
percent = round(student['grade'] * 100 / student['max_grade'])
results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])
response = create_csv_response(filename, header, results)
return response
def post_metrics_data_csv(request):
"""
Generate a list of opened subsections or problems for the entire course for CSV download.
Returns a header array, and an array of arrays in the format:
section, subsection, count of students for subsections
or section, problem, name, count of students, percent of students, score for problems.
"""
data = json.loads(request.POST['data'])
sections = json.loads(data['sections'])
tooltips = json.loads(data['tooltips'])
course_id = data['course_id']
data_type = data['data_type']
results = []
if data_type == 'subsection':
header = [_("Section").encode('utf-8'), _("Subsection").encode('utf-8'), _("Opened by this number of students").encode('utf-8')]
filename = sanitize_filename(_('subsections') + '_' + course_id)
elif data_type == 'problem':
header = [_("Section").encode('utf-8'), _("Problem").encode('utf-8'), _("Name").encode('utf-8'), _("Count of Students").encode('utf-8'), _("Percent of Students").encode('utf-8'), _("Score").encode('utf-8')]
filename = sanitize_filename(_('problems') + '_' + course_id)
for index, section in enumerate(sections):
results.append([section])
# tooltips array is array of dicts for subsections and
# array of array of dicts for problems.
if data_type == 'subsection':
for tooltip_dict in tooltips[index]:
num_students = tooltip_dict['num_students']
subsection = tooltip_dict['subsection_name']
# Append to results offsetting 1 column to the right.
results.append(['', subsection, num_students])
elif data_type == 'problem':
for tooltip in tooltips[index]:
for tooltip_dict in tooltip:
label = tooltip_dict['label']
problem_name = tooltip_dict['problem_name']
count_grade = tooltip_dict['count_grade']
student_count_percent = tooltip_dict['student_count_percent']
percent = tooltip_dict['percent']
# Append to results offsetting 1 column to the right.
results.append(['', label, problem_name, count_grade, student_count_percent, percent])
response = create_csv_response(filename, header, results)
return response
def sanitize_filename(filename):
"""
Utility function
"""
filename = filename.replace(" ", "_")
filename = filename.encode('utf-8')
filename = filename[0:25] + '.csv'
return filename
|
yimingpeng/rl-library
|
refs/heads/master
|
system/common/libs/mwclient/simplejson/tests/test_dump.py
|
4
|
from cStringIO import StringIO
import simplejson as S
def test_dump():
sio = StringIO()
S.dump({}, sio)
assert sio.getvalue() == '{}'
def test_dumps():
assert S.dumps({}) == '{}'
|
gautam1858/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/distributions/laplace.py
|
15
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Laplace distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Laplace",
"LaplaceWithSoftplusScale",
]
@tf_export(v1=["distributions.Laplace"])
class Laplace(distribution.Distribution):
"""The Laplace distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-|x - mu| / sigma) / Z
Z = 2 sigma
```
where `loc = mu`, `scale = sigma`, and `Z` is the normalization constant.
Note that the Laplace distribution can be thought of two exponential
distributions spliced together "back-to-back."
The Lpalce distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Laplace(loc=0, scale=1)
Y = loc + scale * X
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Laplace"):
"""Construct Laplace distribution with parameters `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g., `loc / scale` is a valid operation).
Args:
loc: Floating point tensor which characterizes the location (center)
of the distribution.
scale: Positive floating point tensor which characterizes the spread of
the distribution.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` are of different dtype.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Laplace, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
# positive, "normal" number. However, the concept of subnormality exists
# only at zero; here we need the smallest usable number larger than -1,
# i.e., `-1 + eps/2`.
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log1p(-math_ops.abs(uniform_samples)))
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return special_math.log_cdf_laplace(self._z(x))
def _log_survival_function(self, x):
return special_math.log_cdf_laplace(-self._z(x))
def _cdf(self, x):
z = self._z(x)
return (0.5 + 0.5 * math_ops.sign(z) *
(1. - math_ops.exp(-math_ops.abs(z))))
def _log_unnormalized_prob(self, x):
return -math_ops.abs(self._z(x))
def _log_normalization(self):
return math.log(2.) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale + array_ops.zeros_like(self.loc)
return math.log(2.) + 1. + math_ops.log(scale)
def _mean(self):
return self.loc + array_ops.zeros_like(self.scale)
def _stddev(self):
return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc)
def _median(self):
return self._mean()
def _mode(self):
return self._mean()
def _z(self, x):
return (x - self.loc) / self.scale
class LaplaceWithSoftplusScale(Laplace):
"""Laplace with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Laplace(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="LaplaceWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
super(LaplaceWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
yxtj/Neuron
|
refs/heads/master
|
dataInit/db.py
|
2
|
import string,math
import db_base as base
######################
#initial
def read_raw_time_data(file_name,sepper=','):
fin=open(file_name,'r');
length=fin.readline().count(',')+1;
fin.seek(0);
print 'number of item each line:',length
res=[];
for line in fin:
items=line.split(sepper);
for i in range(length):
if items[i]!='' and items[i]!='\n':
res.append((i+1,float(items[i])))
fin.close()
return res;
def trans_raw_time_to_lists(data):
dict={}
for nid,time in data:
dict.setdefault(nid,[]).append(time)
for line in dict.values():
line.sort()
return [dict[k] for k in sorted(dict.keys())]
def read_raw_cue_data(name,sepper=','):
fin=open(name,'r');
length=fin.readline().count(',')+1;
fin.seek(0);
print length,'each line'
res=[];
last_time=0.0;
last_state=0;
for line in fin:
items=line[:-1].split(sepper)
time=float(items[0]);
state=int(items[1]);
if state==3:
res.append([last_state,last_time,time,time-last_time]);
last_time=time;
last_state=state;
fin.close();
for i in range(1,len(res)):
res[i-1].append(res[i][1]-res[i-1][2])
res[-1].append(0)
return res;
######################
#neuron: IO
#read list format neuron
def read_lists_from_db_time(table_name,block_size=5000,conn=None):
#read data from db
if conn==None:
con=get_db_con();
else:
con=conn
cursor=con.cursor();
cursor.execute("select count(*) from "+table_name+"_neuron")
n=cursor.fetchone()[0];
res=[[] for i in range(n)]
cursor.execute("select * from "+table_name+" order by time")
count=0;
block=cursor.fetchmany(block_size)
while block:
for (nid,time) in block:
res[nid-1].append(time)
count+=block_size;
if count%50000==0:
print count,'pieces processed'
block=cursor.fetchmany(block_size)
if conn==None:
con.close()
print count,'pieces processed'
return res;
#read list format neuron
def read_lists_from_list_file(file_name):
f=open(file_name,'r')
res=[]
for line in f:
l=line.split(' ')
for i in range(len(l)):
l[i]=float(l[i])
res.append(l)
f.close()
return res
def write_to_one_file(res,filename):
#write 2D data to file
fout=open(filename,'w')
for line in res:
l=len(line)
if l!=0:
fout.write(str(line[0]))
for i in range(1,l):
fout.write(' ')
fout.write(str(line[i]))
fout.write('\n')
fout.close();
def write_to_n_files(res,filename_prefix,ext):
#write 2D data to n files (each line for a file)
if ext[0]!='.':
ext='.'+ext
count=0;
for line in res:
count+=1
fout=open(filename_prefix+str(count)+ext,'w')
l=len(line)
if l!=0:
fout.write(str(line[0]))
for i in range(1,l):
fout.write(' ')
fout.write(str(line[i]))
fout.write('\n')
fout.close();
def export_to_file(data,all_name,sep_name_prefix=None,sep_name_suffix=None):
print 'writing to one file'
write_to_one_file(data,all_name)
if sep_name_prefix!=None and sep_name_suffic!=None:
print 'writing to n files'
write_to_n_files(data,sep_name_prefix,sep_name_suffix)
#####################
#update neuron
def cal_neuron_dif(data,mode_unit_size=1e-3):
res=[]
nid=0;
for line in data:
nid+=1
n=len(line)-1
dif_min=line[1]-line[0]
dif_max=line[1]-line[0]
s1=0; s2=0; s3=0; s4=0;
occur={}
zero_count=0
for i in range(n):
t=line[i+1]-line[i]
v=int(t/mode_unit_size)
if v in occur:
occur[v]+=1
else:
occur[v]=1
if t==0:
zero_count+=1
elif t<dif_min:
dif_min=t
elif t>dif_max:
dif_max=t
s1+=t; s2+=t**2; s3+=t**3; s4+=t**4;
s1/=float(n); s2/=float(n); s3/=float(n); s4/=float(n);
#moment related
mean=s1
std=math.sqrt(s2-s1**2)
cm3=s3-3*s2*s1+2*s1**3
cm4=s4-4*s3*s1+6*s2*s1**2-3*s1**4;
#mode(most often)
(mode,freq)=max(occur.iteritems(),key=lambda x:x[1])
mode=(mode+0.5)*mode_unit_size
# print mode,freq
t={'zero_count':zero_count,'min':dif_min,'max':dif_max,'mode':mode,'mode_count':freq,
'mean':mean,'std':std,'cm3':cm3,'cm4':cm4}
res.append(t)
return res
#####################
#dif
'''get the unsorted difference list from line1 to line2, (both line1 and line2 are non-decreasing)
(0 difference is NOT returned in list, but its occurrence number returned separated)
One difference value: for i find smallest line2[j] (>line1[i]), the difference is line2[j]-line1[i]
noJump=True: value (line2[j]-line1[i]) is considered only when line1[i] is also the largest value smaller than line2[j] in line1.
i.e. no other k satisfies: line1[i]<line1[k]<line2[j] . and for all k where line1[i]!=line1[k] satisfies: line1[k]<line1[i] or line1[k]>line2[j]
'''
def cal_dif_list(line1,line2,noJump=False,epsilon=1e-6):
j=0;
length1=len(line1)
length2=len(line2)
res=[]
equal_count=0;
n_dig=int(math.ceil(-math.log10(epsilon)))
for i in range(length1):
v=line1[i]
while j<length2 and line2[j]<=v+epsilon:
if abs(line2[j]-v)<=epsilon:
equal_count+=1
j+=1
if j==length2:
break
if noJump and i+1<length1 and line1[i+1]<line2[j] and v<line1[i+1]:
continue
res.append(round(line2[j]-v,n_dig))
return (equal_count,res)
'''calculate the statistical information of the given UNSORTED dif list
(dif is SORTED in this function)
'''
def cal_dif_list_info(zero_count,dif,quantile_points,mode_unit_size):
dif.sort()
n=len(dif)
s1=0; s2=0; s3=0; s4=0;
s_min=dif[0]; s_max=dif[-1];
occur=[]
last=-1
c=0
for t in dif:
s1+=t; s2+=t**2; s3+=t**3; s4+=t**4;
if int(t/mode_unit_size)==last:
c+=1
else:
occur.append((last,c))
c=1
last=int(t/mode_unit_size)
s1/=float(n); s2/=float(n); s3/=float(n); s4/=float(n);
#moment
mean=s1
std=math.sqrt(s2-s1**2)
cm3=s3-3*s2*s1+2*s1**3
cm4=s4-4*s3*s1+6*s2*s1**2-3*s1**4
#quantile
quantile=[dif[int(round(n*v))] for v in quantile_points]
#mode
(mode,freq)=max(occur,key=lambda x:x[1])
mode=(mode+0.5)*mode_unit_size
return {'count':n,'zero_count':zero_count,'min':s_min,'max':s_max,
'mode':mode,'mode_count':freq,
'mean':mean,'std':std,'cm3':cm3,'cm4':cm4,'quantile':quantile}
def cal_col_dif(data,quantile_points,noJump=False,mode_unit_size=1e-3,epsilon=1e-6,col_file_prefix=None):
length=len(data)
res=[[() for i in range(length)] for i in range(length)];
for i in range(length):
print 'Processing correlation from',(i+1)
l=[]
for j in range(length):
(zero_count,dif)=cal_dif_list(data[i],data[j],noJump,epsilon)
if col_file_prefix!=None:
l.append(dif)
res[i][j]=cal_dif_list_info(zero_count,dif,quantile_points,mode_unit_size)
if col_file_prefix!=None:
write_to_one_file(l,col_file_prefix+str(i+1)+'.txt')
return res
#####################
#final
def init_db(basic_table_name,quantile_points,mode_unit_size=1e-3,epsilon=1e-6,col_file_prefix=None,con=None):
print 'creating tables:'
base.create_tables(basic_table_name,quantile_points,con)
print 'importing data:'
#data=base.import_to_db('R108-122911-spike.csv',read_raw_time_data,basic_table_name)
data=read_raw_time_data('R108-122911-spike.csv')
base.insert_template(data,basic_table_name,con)
#base.import_to_db('R108-122911-beh.csv',read_raw_cue_data,basic_table_name+'_beh(type,begin,end,duration,rest)')
base.insert_template(read_raw_cue_data('R108-122911-beh.csv'),
basic_table_name+'_beh(type,begin,end,duration,rest)',con)
print 'initializing neuron info:'
base.init_neuron(basic_table_name,con)
data=trans_raw_time_to_lists(data)
neu_dif_data=cal_neuron_dif(data,mode_unit_size)
base.update_neuron_dif(neu_dif_data,basic_table_name,con)
#del neu_dif_data
print 'writing difference matrix(jump):'
diff_j=cal_col_dif(data,quantile_points,False,mode_unit_size,epsilon,
col_file_prefix+'j_' if col_file_prefix else None)
base.insert_dif(diff_j,basic_table_name,False,con)
#del diff_j
print 'writing difference matrix(no-jump):'
diff_nj=cal_col_dif(data,quantile_points,True,mode_unit_size,epsilon,
col_file_prefix+'nj_' if col_file_prefix else None)
base.insert_dif(diff_nj,basic_table_name,True,con)
#del diff_nj
if __name__=='__main__':
basic_table_name='r108_122911'
quantile_points=[0.05*i for i in range(1,19)]
#init_db(basic_table_name,quantile_points)
#print 'reading data lists from db'
#data=read_db_time_data(basic_table_name)
#print 'reading data lists from file'
#data=read_file_time_data('./all.txt')
#data=trans_raw_time_to_lists(read_raw_time_data('R108-122911-spike.csv'))
#export_to_file(data,'all.txt','','.txt')
#base.update_neuron_dif(data,basic_table_name)
cal_dif(data,basic_table_name)
|
MSM8226-Samsung/android_kernel_samsung_kmini3g_old
|
refs/heads/cm-12.1
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
Ozerich/ajenti
|
refs/heads/master
|
ajenti/plugins/core/root.py
|
2
|
import re
from ajenti.ui import UI
from ajenti.com import *
from ajenti import version
from ajenti.app.api import ICategoryProvider, IContentProvider
from ajenti.ui.template import BasicTemplate
from ajenti.app.helpers import EventProcessor, SessionPlugin, event
from ajenti.app.urlhandler import URLHandler, url, get_environment_vars
from api import IProgressBoxProvider
class RootDispatcher(URLHandler, SessionPlugin, EventProcessor, Plugin):
categories = Interface(ICategoryProvider)
# Plugin folders. This dict is until we make MUI support
folders = {
'top': '',
'system': 'System',
'hardware': 'Hardware',
'apps': 'Applications',
'servers': 'Servers',
'tools': 'Tools',
'other': 'Other',
'bottom': ''
}
# Folder order
folder_ids = ['top', 'system', 'apps', 'hardware', 'tools', 'servers', 'other', 'bottom']
def on_session_start(self):
self._cat_selected = 'Dashboard'
self._about_visible = False
def main_ui(self):
templ = self.app.get_template('main.xml')
templ.appendChildInto('main-content', self.selected_category.get_ui())
if self._about_visible:
templ.appendChildInto('main-content', self.get_ui_about())
for p in self.app.grab_plugins(IProgressBoxProvider):
if p.has_progress():
templ.appendChildInto(
'progressbox-placeholder',
UI.TopProgressBox(
text=p.get_progress(),
icon=p.icon,
title=p.title,
can_abort=p.can_abort
)
)
break
return templ
def do_init(self):
cat = None
for c in self.categories:
if c.get_name() == self._cat_selected: # initialize current plugin
cat = c
self.selected_category = cat
cat.on_init()
def get_ui_about(self):
ui = UI.Centerer(
UI.VContainer(
UI.Image(file='/dl/core/ui/logo_big.png'),
UI.Spacer(height=6),
UI.Label(text='Ajenti '+version, size=4),
UI.Label(text='Your personal server affairs agent'),
UI.Spacer(height=10),
UI.HContainer(
UI.OutLinkLabel(url='http://www.assembla.com/spaces/ajenti/wiki?id=aLa8XiGfWr36nLeJe5cbLA', text='Wiki'),
UI.OutLinkLabel(url='http://www.assembla.com/spaces/ajenti/wiki?id=ajenti&wiki_id=Developers', text='Credits'),
UI.OutLinkLabel(text='License', url='http://www.gnu.org/licenses/lgpl.html'),
UI.OutLinkLabel(text='Bugs', url='http://www.assembla.com/spaces/ajenti/support/tickets'),
spacing=6
),
UI.Spacer(height=10),
UI.Button(text='Close', id='closeabout')
)
)
return UI.DialogBox(ui, hideok=True, hidecancel=True)
@url('^/$')
def process(self, req, start_response):
self.do_init()
templ = self.app.get_template('index.xml')
cat = None
v = UI.VContainer(spacing=0)
# Sort plugins by name
cats = self.categories
cats = sorted(cats, key=lambda p: p.text)
for fld in self.folder_ids:
cat_vc = UI.VContainer(spacing=0)
if self.folders[fld] == '':
cat_folder = cat_vc # Omit wrapper for special folders
else:
cat_folder = UI.CategoryFolder(
cat_vc,
text=self.folders[fld],
icon='/dl/core/ui/catfolders/'+ fld + '.png'
if self.folders[fld] != '' else '',
id=fld
)
# cat_vc will be VContainer or CategoryFolder
exp = False
empty = True
for c in cats:
if c.folder == fld: # Put corresponding plugins in this folder
empty = False
if c == self.selected_category:
cat_vc.append(UI.Category(icon=c.icon, name=c.text, id=c.get_name(), selected='true'))
exp = True
else:
cat_vc.append(UI.Category(icon=c.icon, name=c.text, id=c.get_name()))
if not empty: v.append(cat_folder)
cat_folder['expanded'] = exp
templ.appendChildInto('leftplaceholder', v)
templ.appendChildInto('rightplaceholder', self.main_ui().elements())
templ.appendChildInto('version', UI.Label(text='Ajenti '+version, size=2))
templ.appendChildInto('links',
UI.HContainer(
UI.LinkLabel(text='About', id='about'),
UI.OutLinkLabel(text='License', url='http://www.gnu.org/licenses/lgpl.html')
))
return templ.render()
@url('^/session_reset$')
def process_reset(self, req, start_response):
self.app.session.clear()
start_response('301 Moved Permanently', [('Location', '/')])
return ''
@event('category/click')
def handle_category(self, event, params, **kw):
if not isinstance(params, list):
return
if len(params) != 1:
return
self._cat_selected = params[0]
self.do_init()
@event('linklabel/click')
def handle_linklabel(self, event, params, vars=None):
if params[0] == 'about':
self._about_visible = True
@event('button/click')
def handle_abort(self, event, params, **kw):
if params[0] == 'aborttask':
for p in self.app.grab_plugins(IProgressBoxProvider):
if p.has_progress():
p.abort()
if params[0] == 'closeabout':
self._about_visible = False
@url('^/handle/.+')
def handle_generic(self, req, start_response):
# Iterate through the IEventDispatchers and find someone who will take care of the event
# TODO: use regexp for shorter event names, ex. 'btn_clickme/click'
path = req['PATH_INFO'].split('/')
event = '/'.join(path[2:4])
params = path[4:]
self.do_init()
# Current module
cat = filter(lambda x: x.get_name() == self._cat_selected, self.categories)[0]
# Search self and current category for event handler
for handler in (cat, self):
if handler.match_event(event):
vars = get_environment_vars(req)
result = handler.event(event, params, vars = vars)
if isinstance(result, str):
# For AJAX calls that do not require information
# just return ''
return result
if isinstance(result, BasicTemplate):
# Useful for inplace AJAX calls (that returns partial page)
return result.render()
# We have no result or handler - return default page
main = self.main_ui()
return main.render()
|
acebrianm/MinimaList
|
refs/heads/master
|
Crypto/SelfTest/Hash/test_MD5.py
|
116
|
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/MD5.py: Self-test for the MD5 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.MD5"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# Test vectors from RFC 1321
('d41d8cd98f00b204e9800998ecf8427e', '', "'' (empty string)"),
('0cc175b9c0f1b6a831c399e269772661', 'a'),
('900150983cd24fb0d6963f7d28e17f72', 'abc'),
('f96b697d7cb7938d525a2f31aaf161d0', 'message digest'),
('c3fcd3d76192e4007dfb496cca67e13b', 'abcdefghijklmnopqrstuvwxyz',
'a-z'),
('d174ab98d277d9f5a5611c2c9f419d9f',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'A-Z, a-z, 0-9'),
('57edf4a22be3c955ac49da2e2107b67a',
'1234567890123456789012345678901234567890123456'
+ '7890123456789012345678901234567890',
"'1234567890' * 8"),
]
def get_tests(config={}):
from Crypto.Hash import MD5
from common import make_hash_tests
return make_hash_tests(MD5, "MD5", test_data,
digest_size=16,
oid="\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05")
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
mohanrex/pykalappai
|
refs/heads/master
|
EkEngine/Engine.py
|
1
|
import pyHook
import pythoncom
from threading import Thread
from EkEngine import WinPipe
from EkEngine.ScimTableParser import ScimTableParser
class Engine(Thread):
def __init__(self, filepath=""):
Thread.__init__(self)
self.file_name = filepath
self.conv_state = True
self.key_state = {
'Lcontrol': False,
'Rcontrol': False,
'Lshift': False,
'Rshift': False,
'Lmenu': False,
'Rmenu': False
}
self.control_keys = ['Lcontrol', 'Rcontrol', 'Lshift', 'Rshift', 'Lmenu', 'Rmenu']
self.event_queue = Queue()
self.char_pressed = ""
self.chars_to_send = ""
self.prev_char_to_delete = ""
self.prev_unicode_char_length = 0
self.scim_mapping = {}
self.scim_mapping_reversed = {}
self.valid_chars = []
self.hm = pyHook.HookManager()
def run(self):
self.initialize()
self.hook()
def initialize(self):
table_parser = ScimTableParser()
self.scim_mapping = table_parser.parse(self.file_name)
self.setvalid_chars()
self.reverse_scim_map()
def un_hook(self):
try:
self.hm.__del__()
except:
pass
def hook(self):
self.hm.KeyDown = self.on_keyboard_event
self.hm.KeyUp = self.on_keyup_event
self.hm.HookKeyboard()
pythoncom.PumpMessages()
def setvalid_chars(self):
keys_list = list(self.scim_mapping.keys())
self.valid_chars = list(set("".join(keys_list)))
return True
def reverse_scim_map(self):
self.scim_mapping_reversed = {}
for k, v in self.scim_mapping.items():
self.scim_mapping_reversed[v] = k
def on_keyup_event(self, event):
if event.Key in self.control_keys:
self.key_state[event.Key] = False
return True
def check_event(self, char):
for key, event in enumerate(self.event_queue.get_list()):
fire = False
for index, keys in enumerate(event[0]):
fire = False
if keys not in self.control_keys:
if keys == char:
fire = True
elif not self.key_state[keys]:
break
if fire:
event[1](event[2])
def on_keyboard_event(self, event):
if event.Key in self.control_keys:
self.key_state[event.Key] = True
self.check_event(event.Key)
if self.conv_state:
char = chr(event.Ascii)
if char in self.valid_chars:
self.char_pressed += char
elif char == " ":
self.char_pressed += char
elif char == 8:
pass
else:
return True
try:
if char == 8:
self.char_pressed = self.char_pressed[:-1]
if len(self.char_pressed) > 20:
self.char_pressed = self.char_pressed[-20:]
for i in range(-5, 0):
chars = self.char_pressed[i:]
if chars in self.scim_mapping:
self.chars_to_send = self.scim_mapping[chars]
if chars[:-1] in self.scim_mapping:
self.prev_char_to_delete = self.scim_mapping[chars[:-1]]
else:
self.prev_char_to_delete = ""
self.prev_unicode_char_length = len(self.prev_char_to_delete)
break
elif i == -1:
return True
if self.prev_unicode_char_length > 0 and len(self.chars_to_send) > 0:
for i in range(0, self.prev_unicode_char_length):
WinPipe.send_backspace()
if self.chars_to_send:
for i in self.chars_to_send:
WinPipe.send_key_press(i)
self.chars_to_send = ""
return False
except KeyError as e:
# print ('MessageName:',event.MessageName)
# print ('Message:',event.Message)
# print ('Time:',event.Time)
# print ('Window:',event.Window)
# print ('WindowName:',event.WindowName)
# print ('Ascii:', event.Ascii, chr(event.Ascii))
# print ('Key:', event.Key)
# print ('KeyID:', event.KeyID)
# print ('ScanCode:', event.ScanCode)
# print ('Extended:', event.Extended)
# print ('Injected:', event.Injected)
# print ('Alt', event.Alt)
# print ('Transition', event.Transition)
# print ('---')
print(e)
return True
else:
return True
class Queue(object):
"""
This is a queue where it will stay at the length of 5. when it reaches five it will start to dequeue the first
element and then append the new element. Used to store the last five keystrokes.
"""
def __init__(self, queue=None):
if queue is None:
self.queue = []
else:
self.queue = list(queue)
def remove_event(self):
try:
return self.queue.pop(0)
except IndexError:
raise IndexError('dequeue from empty Queue')
def register_event(self, element):
self.queue.append(element)
def get_list(self):
return self.queue
def remove_all(self):
self.queue = []
|
rubyinhell/brython
|
refs/heads/master
|
www/src/Lib/decimal.py
|
623
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
|
sergecodd/FireFox-OS
|
refs/heads/master
|
B2G/gecko/media/webrtc/trunk/tools/gyp/test/subdirectory/gyptest-subdir-all.py
|
74
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a subsidiary dependent target from a .gyp file in a
subdirectory, without specifying an explicit output build directory,
and using the subdirectory's solution or project file as the entry point.
"""
import TestGyp
# Ninja and Android don't support running from subdirectories.
test = TestGyp.TestGyp(formats=['!ninja', '!android'])
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
chdir = 'relocate/src/subdir'
target = test.ALL
test.build('prog2.gyp', target, chdir=chdir)
test.built_file_must_not_exist('prog1', type=test.EXECUTABLE, chdir=chdir)
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
valentin-krasontovitsch/ansible
|
refs/heads/devel
|
test/units/module_utils/basic/test_platform_distribution.py
|
14
|
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017-2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.mock.procenv import ModuleTestCase
from units.compat.mock import patch
from ansible.module_utils.six.moves import builtins
# Functions being tested
from ansible.module_utils.basic import get_platform
from ansible.module_utils.basic import get_all_subclasses
from ansible.module_utils.basic import get_distribution
from ansible.module_utils.basic import get_distribution_version
from ansible.module_utils.basic import load_platform_subclass
realimport = builtins.__import__
@pytest.fixture
def platform_linux(mocker):
mocker.patch('platform.system', return_value='Linux')
#
# get_platform tests
#
def test_get_platform():
with patch('platform.system', return_value='foo'):
assert get_platform() == 'foo'
#
# get_distribution tests
#
def test_get_distribution_not_linux():
"""If it's not Linux, then it has no distribution"""
with patch('platform.system', return_value='Foo'):
assert get_distribution() is None
@pytest.mark.usefixtures("platform_linux")
class TestGetDistribution:
""" Tests for get_distribution that have to find somethine"""
def test_distro_known(self):
with patch('ansible.module_utils.distro.name', return_value="foo"):
assert get_distribution() == "Foo"
def test_distro_unknown(self):
with patch('ansible.module_utils.distro.name', return_value=""):
assert get_distribution() == "OtherLinux"
def test_distro_amazon_part_of_another_name(self):
with patch('ansible.module_utils.distro.name', return_value="AmazonFooBar"):
assert get_distribution() == "Amazonfoobar"
def test_distro_amazon_linux(self):
with patch('ansible.module_utils.distro.name', return_value="Amazon Linux AMI"):
assert get_distribution() == "Amazon"
#
# get_distribution_version tests
#
def test_get_distribution_version_not_linux():
"""If it's not Linux, then it has no distribution"""
with patch('platform.system', return_value='Foo'):
assert get_distribution_version() is None
@pytest.mark.usefixtures("platform_linux")
def test_distro_found():
with patch('ansible.module_utils.distro.version', return_value="1"):
assert get_distribution_version() == "1"
#
# Tests for LoadPlatformSubclass
#
class TestLoadPlatformSubclass:
class LinuxTest:
pass
class Foo(LinuxTest):
platform = "Linux"
distribution = None
class Bar(LinuxTest):
platform = "Linux"
distribution = "Bar"
def test_not_linux(self):
# if neither match, the fallback should be the top-level class
with patch('platform.system', return_value="Foo"):
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
assert isinstance(load_platform_subclass(self.LinuxTest), self.LinuxTest)
@pytest.mark.usefixtures("platform_linux")
def test_get_distribution_none(self):
# match just the platform class, not a specific distribution
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
assert isinstance(load_platform_subclass(self.LinuxTest), self.Foo)
@pytest.mark.usefixtures("platform_linux")
def test_get_distribution_found(self):
# match both the distribution and platform class
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value="Bar"):
assert isinstance(load_platform_subclass(self.LinuxTest), self.Bar)
#
# Tests for get_all_subclasses
#
class TestGetAllSubclasses:
class Base:
pass
class BranchI(Base):
pass
class BranchII(Base):
pass
class BranchIA(BranchI):
pass
class BranchIB(BranchI):
pass
class BranchIIA(BranchII):
pass
class BranchIIB(BranchII):
pass
def test_bottom_level(self):
assert get_all_subclasses(self.BranchIIB) == []
def test_one_inheritance(self):
assert set(get_all_subclasses(self.BranchII)) == set([self.BranchIIA, self.BranchIIB])
def test_toplevel(self):
assert set(get_all_subclasses(self.Base)) == set([self.BranchI, self.BranchII,
self.BranchIA, self.BranchIB,
self.BranchIIA, self.BranchIIB])
|
HPPTECH/hpp_IOSTressTest
|
refs/heads/master
|
Resources/ssh/pexpect-3.2/tests/platform_tests/test_middle_buffer.py
|
5
|
#!/usr/bin/env python2
import expyct
import time
e = expyct.expyct ('/bin/sh -i')
e.timeout=60
e.expect(['#', '\$'])
e.send ('ls -la /\n')
i = e.expect (['foo','(d[aeiou]v)'])
print '\nRead before match>%s<' % e.before
print 'Matched:>%s<' % e.matched
print 'index:', i
i = e.expect(['#', '\$'])
print '\nRead before match>%s<' % e.before
print 'Matched:>%s<' % e.matched
print 'index:', i
e.send('exit\n')
print 'Sent exit'
time.sleep(2)
print 'isAlive:', e.isAlive()
# This should test timeout...
i = e.expect ('#####')
print '\nRead before match>%s<' % e.before
print 'Matched:>%s<' % e.matched
print 'index:', i
|
derek-schaefer/django-json-field
|
refs/heads/master
|
test_project/app/tests.py
|
5
|
from __future__ import unicode_literals, division
import inspect
from json_field.fields import JSON_DECODE_ERROR
from test_project.app.models import Test
from test_project.app.forms import TestForm, OptionalForm, \
EvalForm, ModelForm
from django.test import TestCase
from django.db.utils import IntegrityError
import json
import datetime
from decimal import Decimal
try:
from django.utils import unittest
except ImportError:
import unittest
class JSONFieldTest(TestCase):
def test_simple(self):
t1 = Test.objects.create(json=123)
self.assertEqual(123, Test.objects.get(pk=t1.pk).json)
t2 = Test.objects.create(json='123')
self.assertEqual(123, Test.objects.get(pk=t2.pk).json)
t3 = Test.objects.create(json=[123])
self.assertEqual([123], Test.objects.get(pk=t3.pk).json)
t4 = Test.objects.create(json='[123]')
self.assertEqual([123], Test.objects.get(pk=t4.pk).json)
t5 = Test.objects.create(json={'test':[1,2,3]})
self.assertEqual({'test':[1,2,3]}, Test.objects.get(pk=t5.pk).json)
t6 = Test.objects.create(json='{"test":[1,2,3]}')
self.assertEqual({'test':[1,2,3]}, Test.objects.get(pk=t6.pk).json)
t7 = Test.objects.create(json=[1,2,3])
t7.json = {'asdf':123}
self.assertEqual({'asdf':123}, t7.json)
t8 = Test.objects.get(pk=t7.pk)
t8.json = {'asdf':123}
self.assertEqual({'asdf':123}, t8.json)
def test_eager(self):
t1 = Test.objects.create(json_eager=123)
self.assertEqual(123, Test.objects.get(pk=t1.pk).json_eager)
t2 = Test.objects.create(json_eager='123')
self.assertEqual(123, Test.objects.get(pk=t2.pk).json_eager)
t3 = Test.objects.create(json_eager=[123])
self.assertEqual([123], Test.objects.get(pk=t3.pk).json_eager)
t4 = Test.objects.create(json_eager='[123]')
self.assertEqual([123], Test.objects.get(pk=t4.pk).json_eager)
t5 = Test.objects.create(json_eager={'test':[1,2,3]})
self.assertEqual({'test':[1,2,3]}, Test.objects.get(pk=t5.pk).json_eager)
t6 = Test.objects.create(json_eager='{"test":[1,2,3]}')
self.assertEqual({'test':[1,2,3]}, Test.objects.get(pk=t6.pk).json_eager)
t7 = Test.objects.create(json_eager=[1,2,3])
t7.json_eager = {'asdf':123}
self.assertEqual({'asdf':123}, t7.json_eager)
t8 = Test.objects.get(pk=t7.pk)
t8.json_eager = {'asdf':123}
self.assertEqual({'asdf':123}, t8.json_eager)
def test_null(self):
t1 = Test.objects.create(json=None)
self.assertEqual(None, t1.json)
self.assertEqual('null', t1.get_json_json())
t2 = Test.objects.create(json='')
self.assertEqual('', t2.json)
self.assertEqual('""', t2.get_json_json())
t3 = Test.objects.create(json_null=None)
self.assertEqual(None, t3.json_null)
self.assertEqual('null', t3.get_json_null_json())
t4 = Test.objects.create(json_null='')
self.assertEqual('', t4.json_null)
self.assertEqual('""', t4.get_json_null_json())
def test_decimal(self):
t1 = Test.objects.create(json=1.24)
self.assertEqual(Decimal('1.24'), Test.objects.get(pk=t1.pk).json)
t2 = Test.objects.create(json=Decimal(1.24))
self.assertEqual(str(Decimal(1.24)), Test.objects.get(pk=t2.pk).json)
t3 = Test.objects.create(json={'test':[{'test':Decimal(1.24)}]})
self.assertEqual({'test':[{'test':str(Decimal(1.24))}]}, Test.objects.get(pk=t3.pk).json)
def test_time(self):
now = datetime.datetime.now().time()
t1 = Test.objects.create(json=now)
# JSON does not have microsecond precision, round to millisecond
now_rounded = now.replace(microsecond=(int(now.microsecond) // 1000) * 1000)
self.assertEqual(now_rounded, Test.objects.get(pk=t1.pk).json)
t2 = Test.objects.create(json={'time':[now]})
self.assertEqual({'time':[now_rounded]}, Test.objects.get(pk=t2.pk).json)
def test_date(self):
today = datetime.date.today()
t1 = Test.objects.create(json=today)
self.assertEqual(today, Test.objects.get(pk=t1.pk).json)
t2 = Test.objects.create(json={'today':today})
self.assertEqual({'today':today}, Test.objects.get(pk=t2.pk).json)
def test_datetime(self):
now = datetime.datetime.now()
t1 = Test.objects.create(json=now)
# JSON does not have microsecond precision, round to millisecond
now_rounded = now.replace(microsecond=(int(now.microsecond) // 1000) * 1000)
self.assertEqual(now_rounded, Test.objects.get(pk=t1.pk).json)
t2 = Test.objects.create(json={'test':[{'test':now}]})
self.assertEqual({'test':[{'test':now_rounded}]}, Test.objects.get(pk=t2.pk).json)
def test_numerical_strings(self):
t1 = Test.objects.create(json='"555"')
self.assertEqual('555', Test.objects.get(pk=t1.pk).json)
t2 = Test.objects.create(json='"123.98712634789162349781264"')
self.assertEqual('123.98712634789162349781264', Test.objects.get(pk=t2.pk).json)
def test_datelike_strings(self):
t1 = Test.objects.create(json='{"title": "2014-01-27 | Title with date"}')
self.assertEqual({'title': '2014-01-27 | Title with date'}, Test.objects.get(pk=t1.pk).json)
t2 = Test.objects.create(json='{"title": "10:42:07 | Title with date"}')
self.assertEqual({'title': '10:42:07 | Title with date'}, Test.objects.get(pk=t2.pk).json)
t3 = Test.objects.create(json='{"title": "10:42:07.123 | Title with date"}')
self.assertEqual({'title': '10:42:07.123 | Title with date'}, Test.objects.get(pk=t3.pk).json)
t4 = Test.objects.create(json='{"title": "2014-05-07T12:34:56 | Title with date"}')
self.assertEqual({'title': '2014-05-07T12:34:56 | Title with date'}, Test.objects.get(pk=t4.pk).json)
def test_get_set_json(self):
t1 = Test.objects.create(json={'test':123})
self.assertEqual({'test':123}, t1.json)
self.assertEqual('{"test": 123}', t1.get_json_json())
t2 = Test.objects.create(json='')
self.assertEqual('', t2.json)
self.assertEqual('""', t2.get_json_json())
self.assertEqual(None, t2.json_null)
self.assertEqual('null', t2.get_json_null_json())
t3 = Test.objects.create(json=[1,2,3])
self.assertEqual([1,2,3], t3.json)
self.assertEqual('[1, 2, 3]', t3.get_json_json())
t3.set_json_json('[1, 2, 3, 4, 5]')
self.assertEqual([1, 2, 3, 4, 5], t3.json)
self.assertEqual('[1, 2, 3, 4, 5]', t3.get_json_json())
t3.set_json_json(123)
self.assertEqual(123, t3.json)
self.assertEqual('123', t3.get_json_json())
def test_strings(self):
t1 = Test.objects.create(json='a')
self.assertEqual('a', t1.json)
self.assertEqual('"a"', t1.get_json_json())
t2 = Test.objects.create(json='"a"')
self.assertEqual('a', t2.json)
self.assertEqual('"a"', t2.get_json_json())
t3 = Test.objects.create(json_null='a')
self.assertEqual('a', t3.json_null)
self.assertEqual('"a"', t3.get_json_null_json())
t4 = Test.objects.create(json='"a')
self.assertEqual('"a', t4.json)
self.assertEqual('"\\"a"', t4.get_json_json())
def test_formfield(self):
data = {'json': '{"asdf":42}'}
f1 = TestForm(data)
self.assertTrue(f1.is_valid())
self.assertEqual(f1.cleaned_data, {'json': {'asdf':42}})
f2 = TestForm({})
self.assertFalse(f2.is_valid())
f3 = OptionalForm({})
self.assertTrue(f3.is_valid())
self.assertEqual(f3.cleaned_data, {'json': None})
f4 = TestForm({'json':'{"time": datetime.datetime.now()}'})
self.assertFalse(f4.is_valid())
f5 = EvalForm({'json':'{"time": datetime.datetime.now()}'})
self.assertTrue(f5.is_valid())
f6 = ModelForm({'json':'{"time": datetime.datetime.now()}'})
self.assertFalse(f6.is_valid())
f7 = ModelForm({'json':'{"time": datetime.datetime.now()}'})
self.assertFalse(f7.is_valid())
def test_creator_plays_nice_with_module_inspect(self):
"""
From upstream, based on:
https://code.djangoproject.com/ticket/12568
and corresponding patch:
https://code.djangoproject.com/changeset/50633e7353694ff54f14b04469be3792f286182f
Custom fields should play nice with python standard module inspect.
http://users.rcn.com/python/download/Descriptor.htm#properties
"""
# The custom Creator's non property like behaviour made the properties
# invisible for inspection.
data = dict(inspect.getmembers(Test))
self.assertIn('json', data)
|
JetBrains/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/singleLine/Indent21.after.py
|
747
|
class C:
def foo(self):
x = 1
y = 2
|
emidln/django_roa
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/contrib/sitemaps/tests/__init__.py
|
299
|
from django.contrib.sitemaps.tests.basic import *
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/333_test_utils.py
|
26
|
import datetime
from email import utils
import test.support
import time
import unittest
import sys
import os.path
class DateTimeTests(unittest.TestCase):
datestring = 'Sun, 23 Sep 2001 20:10:55'
dateargs = (2001, 9, 23, 20, 10, 55)
offsetstring = ' -0700'
utcoffset = datetime.timedelta(hours=-7)
tz = datetime.timezone(utcoffset)
naive_dt = datetime.datetime(*dateargs)
aware_dt = datetime.datetime(*dateargs, tzinfo=tz)
def test_naive_datetime(self):
self.assertEqual(utils.format_datetime(self.naive_dt),
self.datestring + ' -0000')
def test_aware_datetime(self):
self.assertEqual(utils.format_datetime(self.aware_dt),
self.datestring + self.offsetstring)
def test_usegmt(self):
utc_dt = datetime.datetime(*self.dateargs,
tzinfo=datetime.timezone.utc)
self.assertEqual(utils.format_datetime(utc_dt, usegmt=True),
self.datestring + ' GMT')
def test_usegmt_with_naive_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.naive_dt, usegmt=True)
def test_usegmt_with_non_utc_datetime_raises(self):
with self.assertRaises(ValueError):
utils.format_datetime(self.aware_dt, usegmt=True)
def test_parsedate_to_datetime(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + self.offsetstring),
self.aware_dt)
def test_parsedate_to_datetime_naive(self):
self.assertEqual(
utils.parsedate_to_datetime(self.datestring + ' -0000'),
self.naive_dt)
class LocaltimeTests(unittest.TestCase):
def test_localtime_is_tz_aware_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t = utils.localtime()
self.assertIsNot(t.tzinfo, None)
def test_localtime_is_tz_aware_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t = utils.localtime()
self.assertIsNot(t.tzinfo, None)
def test_localtime_daylight_true_dst_false(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_false_dst_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=-1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_true_dst_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
def test_localtime_daylight_false_dst_true(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(2012, 3, 12, 1, 1)
t1 = utils.localtime(t0, isdst=1)
t2 = utils.localtime(t1)
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_localtime_epoch_utc_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
@test.support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_localtime_epoch_utc_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1, tzinfo = datetime.timezone.utc)
t1 = utils.localtime(t0)
t2 = t0 - datetime.timedelta(hours=5)
t2 = t2.replace(tzinfo = datetime.timezone(datetime.timedelta(hours=-5)))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_true(self):
test.support.patch(self, time, 'daylight', True)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
def test_localtime_epoch_notz_daylight_false(self):
test.support.patch(self, time, 'daylight', False)
t0 = datetime.datetime(1990, 1, 1)
t1 = utils.localtime(t0)
t2 = utils.localtime(t0.replace(tzinfo=None))
self.assertEqual(t1, t2)
# XXX: Need a more robust test for Olson's tzdata
@unittest.skipIf(sys.platform.startswith('win'),
"Windows does not use Olson's TZ database")
@unittest.skipUnless(os.path.exists('/usr/share/zoneinfo') or
os.path.exists('/usr/lib/zoneinfo'),
"Can't find the Olson's TZ database")
@test.support.run_with_tz('Europe/Kiev')
def test_variable_tzname(self):
t0 = datetime.datetime(1984, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'MSK')
t0 = datetime.datetime(1994, 1, 1, tzinfo=datetime.timezone.utc)
t1 = utils.localtime(t0)
self.assertEqual(t1.tzname(), 'EET')
if __name__ == '__main__':
unittest.main()
|
18padx08/PPTex
|
refs/heads/master
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/printing/python.py
|
118
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import keyword as kw
import sympy
from .repr import ReprPrinter
from .str import StrPrinter
# A list of classes that should be printed using StrPrinter
STRPRINT = ("Add", "Infinity", "Integer", "Mul", "NegativeInfinity",
"Pow", "Zero")
class PythonPrinter(ReprPrinter, StrPrinter):
"""A printer which converts an expression into its Python interpretation."""
def __init__(self, settings=None):
ReprPrinter.__init__(self)
StrPrinter.__init__(self, settings)
self.symbols = []
self.functions = []
# Create print methods for classes that should use StrPrinter instead
# of ReprPrinter.
for name in STRPRINT:
f_name = "_print_%s" % name
f = getattr(StrPrinter, f_name)
setattr(PythonPrinter, f_name, f)
def _print_Function(self, expr):
func = expr.func.__name__
if not hasattr(sympy, func) and not func in self.functions:
self.functions.append(func)
return StrPrinter._print_Function(self, expr)
# procedure (!) for defining symbols which have be defined in print_python()
def _print_Symbol(self, expr):
symbol = self._str(expr)
if symbol not in self.symbols:
self.symbols.append(symbol)
return StrPrinter._print_Symbol(self, expr)
def _print_module(self, expr):
raise ValueError('Modules in the expression are unacceptable')
def python(expr, **settings):
"""Return Python interpretation of passed expression
(can be passed to the exec() function without any modifications)"""
printer = PythonPrinter(settings)
exprp = printer.doprint(expr)
result = ''
# Returning found symbols and functions
renamings = {}
for symbolname in printer.symbols:
newsymbolname = symbolname
# Escape symbol names that are reserved python keywords
if kw.iskeyword(newsymbolname):
while True:
newsymbolname += "_"
if (newsymbolname not in printer.symbols and
newsymbolname not in printer.functions):
renamings[sympy.Symbol(
symbolname)] = sympy.Symbol(newsymbolname)
break
result += newsymbolname + ' = Symbol(\'' + symbolname + '\')\n'
for functionname in printer.functions:
newfunctionname = functionname
# Escape function names that are reserved python keywords
if kw.iskeyword(newfunctionname):
while True:
newfunctionname += "_"
if (newfunctionname not in printer.symbols and
newfunctionname not in printer.functions):
renamings[sympy.Function(
functionname)] = sympy.Function(newfunctionname)
break
result += newfunctionname + ' = Function(\'' + functionname + '\')\n'
if not len(renamings) == 0:
exprp = expr.subs(renamings)
result += 'e = ' + printer._str(exprp)
return result
def print_python(expr, **settings):
"""Print output of python() function"""
print(python(expr, **settings))
|
shastikk/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/googlesearch.py
|
168
|
from __future__ import unicode_literals
import itertools
import re
from .common import SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
)
class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = 'Google Video search'
_MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
_TEST = {
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
}
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
res = {
'_type': 'playlist',
'id': query,
'title': query,
}
for pagenum in itertools.count():
result_url = (
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
% (compat_urllib_parse.quote_plus(query), pagenum * 10))
webpage = self._download_webpage(
result_url, 'gvsearch:' + query,
note='Downloading result page ' + str(pagenum + 1))
for hit_idx, mobj in enumerate(re.finditer(
r'<h3 class="r"><a href="([^"]+)"', webpage)):
# Skip playlists
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
continue
entries.append({
'_type': 'url',
'url': mobj.group(1)
})
if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage):
res['entries'] = entries[:n]
return res
|
ssteo/scrapy
|
refs/heads/master
|
tests/mocks/dummydbm.py
|
179
|
"""DBM-like dummy module"""
import collections
class DummyDB(dict):
"""Provide dummy DBM-like interface."""
def close(self):
pass
error = KeyError
_DATABASES = collections.defaultdict(DummyDB)
def open(file, flag='r', mode=0o666):
"""Open or create a dummy database compatible.
Arguments `flag` and `mode` are ignored.
"""
# return same instance for same file argument
return _DATABASES[file]
|
crazypoo/find-unused-images
|
refs/heads/master
|
find_unused_images.py
|
1
|
#######################################################
# This script is for reduce pakage size of ipa file
# copyright reserved xiabin
# trimpackage.py
#######################################################
import sys
import os
import re
import types
xcext = "xcodeproj"
pbxname = "project.pbxproj"
# Help
def help():
print "Please using this script like this \"python trimpackage.py project path\""
# Check the argument
def argvcheck():
if len(sys.argv) < 2:
return False
else:
global projpath
projpath = sys.argv[1]
# Check if file exist
exist = os.path.exists(projpath)
if exist == False:
return False
else:
return projpath.endswith(xcext)
# Get compiled .png and source file
def getresource(pngdic,sourcedic):
# 1.Get project file path
pbxpath = projpath + "/" + pbxname
# 2.Read project file
pbxhandle = open(pbxpath,"r")
isresblock = False
issourceblock = False
# 3.Get .png and source file
for line in pbxhandle:
if (line.find("Begin PBXResourcesBuildPhase section") == -1) and (not isresblock) and (line.find("Begin PBXSourcesBuildPhase section") == -1) and (not issourceblock):
continue
elif line.find("Begin PBXResourcesBuildPhase section") != -1:
isresblock = True
elif line.find("Begin PBXSourcesBuildPhase section") != -1:
issourceblock = True
elif line.find("End PBXResourcesBuildPhase section") != -1:
isresblock = False
elif line.find("End PBXSourcesBuildPhase section") != -1:
issourceblock = False
else:
if isresblock: # In resource block
pattern = re.compile(r"\*\s(.+)(?:@2x)+\.png")
match = pattern.findall(line)
if match:
pngdic[match[0]] = match[0]
if issourceblock: # In source block
pattern = re.compile(r"\*\s(.+\.m*)")
match = pattern.findall(line)
if match:
sourcedic[match[0]] = match[0]
pbxhandle.close()
def checkinfile(filepath, pngdic):
print "Checking file: " + filepath
# Read source file
filehandle = open(filepath)
iscomment = False
pattern = re.compile(r'.*imageNamed:@"([^@\.]*)(?:@2x)*(?:\.png)*"') # Find the source
for line in filehandle:
# Trim white space
if iscomment:
commentendpos = line.find("*/")
if commentendpos != -1:
iscomment = False
# In this version, we ignore this case "...*/ your code"
else:
commentbeginpos = line.find("/*")
if commentbeginpos != -1:
iscomment = True
continue
# In this version, we ignore this case "your code /*..."
commentpos = line.find("//")
match = pattern.match(line)
matchpos = -1
if match:
group = match.groups(match.lastindex)
filename = group[0]
matchpos = match.start(match.lastindex)
if matchpos < commentpos or commentpos == -1: # Code not in comment
print "find " + filename
# Remove file name from pngdic
if pngdic.get(filename, 0) == filename:
pngdic.pop(filename)
def check(root, pngdic, sourcedic):
# Travel the path
for onepath in os.listdir(root):
fullpath = os.path.join(root,onepath)
if os.path.isdir(fullpath):
if not fullpath.endswith(xcext):
check(fullpath, pngdic, sourcedic)
else:
if sourcedic.get(onepath, 0) == onepath:
checkinfile(fullpath,pngdic)
#######################################################
if __name__ == "__main__":
if argvcheck() == True:
allpngdic = {}
allsourcedic = {}
getresource(allpngdic,allsourcedic)
# Travel project
projroot = os.path.dirname(projpath)
check(projroot, allpngdic, allsourcedic)
print "The png files which are not compiled are list as follows:"
for filename in allpngdic:
print filename
else:
help()
|
SkippsDev/Py-Agar.io
|
refs/heads/master
|
src/protocol/UpdatePing.py
|
1
|
from src.packet.ArrayBuffer import ArrayBuffer
from src.packet.NetPacker import NetPacker
class UpdatePing:
def __init__(self):
self.buffer = ArrayBuffer()
self.packer = NetPacker(self.buffer)
def build(self, handler, payload, isBinary):
self.handler = handler
self.payload = payload
self.isBinary = isBinary
self.buildMessage()
self.sendData()
def buildMessage(self):
self.packer.addUint8(224) # Ping messageType
def sendData(self):
self.handler.sendMessage(self.buffer.getData(), self.isBinary)
|
hugs/selenium
|
refs/heads/master
|
selenium/src/py/lib/docutils/writers/s5_html/__init__.py
|
5
|
# Author: Chris Liechti
# Contact: cliechti@gmx.net
# Author: David Goodger
# Contact: goodger@python.org
# Revision: $Revision: 4461 $
# Date: $Date: 2006-04-01 02:25:45 +0200 (Sat, 01 Apr 2006) $
# Copyright: This module has been placed in the public domain.
"""
S5/HTML Slideshow Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import docutils
from docutils import frontend, nodes, utils
from docutils.writers import html4css1
from docutils.parsers.rst import directives
themes_dir_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(os.path.dirname(__file__), 'themes'))
def find_theme(name):
# Where else to look for a theme?
# Check working dir? Destination dir? Config dir? Plugins dir?
path = os.path.join(themes_dir_path, name)
if not os.path.isdir(path):
raise docutils.ApplicationError(
'Theme directory not found: %r (path: %r)' % (name, path))
return path
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'S5 Slideshow Specific Options',
'For the S5/HTML writer, the --no-toc-backlinks option '
'(defined in General Docutils Options above) is the default, '
'and should not be changed.',
(('Specify an installed S5 theme by name. Overrides --theme-url. '
'The default theme name is "default". The theme files will be '
'copied into a "ui/<theme>" directory, in the same directory as the '
'destination file (output HTML). Note that existing theme files '
'will not be overwritten (unless --overwrite-theme-files is used).',
['--theme'],
{'default': 'default', 'metavar': '<name>',
'overrides': 'theme_url'}),
('Specify an S5 theme URL. The destination file (output HTML) will '
'link to this theme; nothing will be copied. Overrides --theme.',
['--theme-url'],
{'metavar': '<URL>', 'overrides': 'theme'}),
('Allow existing theme files in the ``ui/<theme>`` directory to be '
'overwritten. The default is not to overwrite theme files.',
['--overwrite-theme-files'],
{'action': 'store_true'}),
('Keep existing theme files in the ``ui/<theme>`` directory; do not '
'overwrite any. This is the default.',
['--keep-theme-files'],
{'dest': 'overwrite_theme_files', 'action': 'store_false'}),
('Set the initial view mode to "slideshow" [default] or "outline".',
['--view-mode'],
{'choices': ['slideshow', 'outline'], 'default': 'slideshow',
'metavar': '<mode>'}),
('Normally hide the presentation controls in slideshow mode. '
'This is the default.',
['--hidden-controls'],
{'action': 'store_true', 'default': True}),
('Always show the presentation controls in slideshow mode. '
'The default is to hide the controls.',
['--visible-controls'],
{'dest': 'hidden_controls', 'action': 'store_false'}),
('Enable the current slide indicator ("1 / 15"). '
'The default is to disable it.',
['--current-slide'],
{'action': 'store_true'}),
('Disable the current slide indicator. This is the default.',
['--no-current-slide'],
{'dest': 'current_slide', 'action': 'store_false'}),))
settings_default_overrides = {'toc_backlinks': 0}
config_section = 's5_html writer'
config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = S5HTMLTranslator
class S5HTMLTranslator(html4css1.HTMLTranslator):
s5_stylesheet_template = """\
<!-- configuration parameters -->
<meta name="defaultView" content="%(view_mode)s" />
<meta name="controlVis" content="%(control_visibility)s" />
<!-- style sheet links -->
<script src="%(path)s/slides.js" type="text/javascript"></script>
<link rel="stylesheet" href="%(path)s/slides.css"
type="text/css" media="projection" id="slideProj" />
<link rel="stylesheet" href="%(path)s/outline.css"
type="text/css" media="screen" id="outlineStyle" />
<link rel="stylesheet" href="%(path)s/print.css"
type="text/css" media="print" id="slidePrint" />
<link rel="stylesheet" href="%(path)s/opera.css"
type="text/css" media="projection" id="operaFix" />\n"""
# The script element must go in front of the link elements to
# avoid a flash of unstyled content (FOUC), reproducible with
# Firefox.
disable_current_slide = """
<style type="text/css">
#currentSlide {display: none;}
</style>\n"""
layout_template = """\
<div class="layout">
<div id="controls"></div>
<div id="currentSlide"></div>
<div id="header">
%(header)s
</div>
<div id="footer">
%(title)s%(footer)s
</div>
</div>\n"""
# <div class="topleft"></div>
# <div class="topright"></div>
# <div class="bottomleft"></div>
# <div class="bottomright"></div>
default_theme = 'default'
"""Name of the default theme."""
base_theme_file = '__base__'
"""Name of the file containing the name of the base theme."""
direct_theme_files = (
'slides.css', 'outline.css', 'print.css', 'opera.css', 'slides.js')
"""Names of theme files directly linked to in the output HTML"""
indirect_theme_files = (
's5-core.css', 'framing.css', 'pretty.css', 'blank.gif', 'iepngfix.htc')
"""Names of files used indirectly; imported or used by files in
`direct_theme_files`."""
required_theme_files = indirect_theme_files + direct_theme_files
"""Names of mandatory theme files."""
def __init__(self, *args):
html4css1.HTMLTranslator.__init__(self, *args)
#insert S5-specific stylesheet and script stuff:
self.theme_file_path = None
self.setup_theme()
view_mode = self.document.settings.view_mode
control_visibility = ('visible', 'hidden')[self.document.settings
.hidden_controls]
self.stylesheet.append(self.s5_stylesheet_template
% {'path': self.theme_file_path,
'view_mode': view_mode,
'control_visibility': control_visibility})
if not self.document.settings.current_slide:
self.stylesheet.append(self.disable_current_slide)
self.add_meta('<meta name="version" content="S5 1.1" />\n')
self.s5_footer = []
self.s5_header = []
self.section_count = 0
self.theme_files_copied = None
def setup_theme(self):
if self.document.settings.theme:
self.copy_theme()
elif self.document.settings.theme_url:
self.theme_file_path = self.document.settings.theme_url
else:
raise docutils.ApplicationError(
'No theme specified for S5/HTML writer.')
def copy_theme(self):
"""
Locate & copy theme files.
A theme may be explicitly based on another theme via a '__base__'
file. The default base theme is 'default'. Files are accumulated
from the specified theme, any base themes, and 'default'.
"""
settings = self.document.settings
path = find_theme(settings.theme)
theme_paths = [path]
self.theme_files_copied = {}
required_files_copied = {}
# This is a link (URL) in HTML, so we use "/", not os.sep:
self.theme_file_path = '%s/%s' % ('ui', settings.theme)
if settings._destination:
dest = os.path.join(
os.path.dirname(settings._destination), 'ui', settings.theme)
if not os.path.isdir(dest):
os.makedirs(dest)
else:
# no destination, so we can't copy the theme
return
default = 0
while path:
for f in os.listdir(path): # copy all files from each theme
if f == self.base_theme_file:
continue # ... except the "__base__" file
if ( self.copy_file(f, path, dest)
and f in self.required_theme_files):
required_files_copied[f] = 1
if default:
break # "default" theme has no base theme
# Find the "__base__" file in theme directory:
base_theme_file = os.path.join(path, self.base_theme_file)
# If it exists, read it and record the theme path:
if os.path.isfile(base_theme_file):
lines = open(base_theme_file).readlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
path = find_theme(line)
if path in theme_paths: # check for duplicates (cycles)
path = None # if found, use default base
else:
theme_paths.append(path)
break
else: # no theme name found
path = None # use default base
else: # no base theme file found
path = None # use default base
if not path:
path = find_theme(self.default_theme)
theme_paths.append(path)
default = 1
if len(required_files_copied) != len(self.required_theme_files):
# Some required files weren't found & couldn't be copied.
required = list(self.required_theme_files)
for f in required_files_copied.keys():
required.remove(f)
raise docutils.ApplicationError(
'Theme files not found: %s'
% ', '.join(['%r' % f for f in required]))
files_to_skip_pattern = re.compile(r'~$|\.bak$|#$|\.cvsignore$')
def copy_file(self, name, source_dir, dest_dir):
"""
Copy file `name` from `source_dir` to `dest_dir`.
Return 1 if the file exists in either `source_dir` or `dest_dir`.
"""
source = os.path.join(source_dir, name)
dest = os.path.join(dest_dir, name)
if self.theme_files_copied.has_key(dest):
return 1
else:
self.theme_files_copied[dest] = 1
if os.path.isfile(source):
if self.files_to_skip_pattern.search(source):
return None
settings = self.document.settings
if os.path.exists(dest) and not settings.overwrite_theme_files:
settings.record_dependencies.add(dest)
else:
src_file = open(source, 'rb')
src_data = src_file.read()
src_file.close()
dest_file = open(dest, 'wb')
dest_dir = dest_dir.replace(os.sep, '/')
dest_file.write(src_data.replace(
'ui/default', dest_dir[dest_dir.rfind('ui/'):]))
dest_file.close()
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
return 1
def depart_document(self, node):
header = ''.join(self.s5_header)
footer = ''.join(self.s5_footer)
title = ''.join(self.html_title).replace('<h1 class="title">', '<h1>')
layout = self.layout_template % {'header': header,
'title': title,
'footer': footer}
self.fragment.extend(self.body)
self.body_prefix.extend(layout)
self.body_prefix.append('<div class="presentation">\n')
self.body_prefix.append(
self.starttag({'classes': ['slide'], 'ids': ['slide0']}, 'div'))
if not self.section_count:
self.body.append('</div>\n')
self.body_suffix.insert(0, '</div>\n')
# skip content-type meta tag with interpolated charset value:
self.html_head.extend(self.head[1:])
self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ self.docinfo + self.body
+ self.body_suffix[:-1])
def depart_footer(self, node):
start = self.context.pop()
self.s5_footer.append('<h2>')
self.s5_footer.extend(self.body[start:])
self.s5_footer.append('</h2>')
del self.body[start:]
def depart_header(self, node):
start = self.context.pop()
header = ['<div id="header">\n']
header.extend(self.body[start:])
header.append('\n</div>\n')
del self.body[start:]
self.s5_header.extend(header)
def visit_section(self, node):
if not self.section_count:
self.body.append('\n</div>\n')
self.section_count += 1
self.section_level += 1
if self.section_level > 1:
# dummy for matching div's
self.body.append(self.starttag(node, 'div', CLASS='section'))
else:
self.body.append(self.starttag(node, 'div', CLASS='slide'))
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.section):
level = self.section_level + self.initial_header_level - 1
if level == 1:
level = 2
tag = 'h%s' % level
self.body.append(self.starttag(node, tag, ''))
self.context.append('</%s>\n' % tag)
else:
html4css1.HTMLTranslator.visit_subtitle(self, node)
def visit_title(self, node, move_ids=0):
html4css1.HTMLTranslator.visit_title(self, node, move_ids=move_ids)
|
kouk/boto
|
refs/heads/develop
|
boto/cacerts/__init__.py
|
260
|
# Copyright 2010 Google Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
mpetyx/energagement
|
refs/heads/master
|
energagement/energagement/wsgi.py
|
1
|
"""
WSGI config for energagement project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "energagement.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
emilio/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py
|
38
|
# flake8: noqa (not ideal, but nicer than adding noqa: F401 to every line!)
from .serializer import serialize
from .parser import parse
from .backends.static import compile as compile_static
from .backends.conditional import compile as compile_condition
|
Maccimo/intellij-community
|
refs/heads/master
|
python/helpers/third_party/thriftpy/_shaded_thriftpy/tornado.py
|
5
|
# -*- coding: utf-8 -*-
"""
>>> pingpong = _shaded_thriftpy.load("pingpong.thrift")
>>>
>>> class Dispatcher(object):
>>> def ping(self):
>>> return "pong"
>>> server = make_server(pingpong.PingPong, Dispatcher())
>>> server.listen(6000)
>>> client = ioloop.IOLoop.current().run_sync(
lambda: make_client(pingpong.PingPong, '127.0.0.1', 6000))
>>> ioloop.IOLoop.current().run_sync(client.ping)
'pong'
"""
from __future__ import absolute_import
from contextlib import contextmanager
from tornado import tcpserver, iostream, gen
from tornado import version as tornado_version
from io import BytesIO
from datetime import timedelta
from .transport import TTransportException, TTransportBase
from .transport.memory import TMemoryBuffer
from .thrift import TApplicationException, TProcessor, TClient
# TODO need TCyTornadoStreamTransport to work with cython binary protocol
from .protocol.binary import TBinaryProtocolFactory
from ._compat import PY3
if PY3:
import urllib
else:
import urllib2 as urllib
import urlparse
urllib.parse = urlparse
urllib.parse.quote = urllib.quote
import logging
import socket
import struct
try:
from tornado.locks import Lock
except ImportError:
try:
from toro import Lock
except ImportError:
raise RuntimeError('With tornado {}, you need to install '
'"toro"'.format(tornado_version))
logger = logging.getLogger(__name__)
class TTornadoStreamTransport(TTransportBase):
"""a framed, buffered transport over a Tornado stream"""
DEFAULT_CONNECT_TIMEOUT = timedelta(seconds=1)
DEFAULT_READ_TIMEOUT = timedelta(seconds=1)
def __init__(self, host, port, stream=None, io_loop=None, ssl_options=None,
read_timeout=DEFAULT_READ_TIMEOUT):
self.host = host
self.port = port
self.io_loop = io_loop
self.read_timeout = read_timeout
self.is_queuing_reads = False
self.read_queue = []
self.__wbuf = BytesIO()
self._read_lock = Lock()
self.ssl_options = ssl_options
# servers provide a ready-to-go stream
self.stream = stream
if self.stream is not None:
self._set_close_callback()
if tornado_version >= '5.0':
def with_timeout(self, timeout, future):
return gen.with_timeout(timeout, future)
else:
def with_timeout(self, timeout, future):
return gen.with_timeout(timeout, future, self.io_loop)
@gen.coroutine
def open(self, timeout=DEFAULT_CONNECT_TIMEOUT):
logger.debug('socket connecting')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
if self.ssl_options is None:
self.stream = iostream.IOStream(sock)
else:
self.stream = iostream.SSLIOStream(
sock, ssl_options=self.ssl_options)
try:
yield self.with_timeout(timeout, self.stream.connect(
(self.host, self.port)))
except (socket.error, OSError, IOError):
message = 'could not connect to {}:{}'.format(self.host, self.port)
raise TTransportException(
type=TTransportException.NOT_OPEN,
message=message)
self._set_close_callback()
raise gen.Return(self)
def _set_close_callback(self):
self.stream.set_close_callback(self.close)
def close(self):
# don't raise if we intend to close
self.stream.set_close_callback(None)
self.stream.close()
def read(self, _):
# The generated code for Tornado shouldn't do individual reads -- only
# frames at a time
assert False, "you're doing it wrong"
@contextmanager
def io_exception_context(self):
try:
yield
except (socket.error, OSError, IOError) as e:
raise TTransportException(
type=TTransportException.END_OF_FILE,
message=str(e))
except iostream.StreamBufferFullError as e:
raise TTransportException(
type=TTransportException.UNKNOWN,
message=str(e))
except gen.TimeoutError as e:
raise TTransportException(
type=TTransportException.TIMED_OUT,
message=str(e))
@gen.coroutine
def read_frame(self):
# IOStream processes reads one at a time
with (yield self._read_lock.acquire()):
with self.io_exception_context():
frame_header = yield self._read_bytes(4)
if len(frame_header) == 0:
raise iostream.StreamClosedError(
'Read zero bytes from stream')
frame_length, = struct.unpack('!i', frame_header)
logger.debug('received frame header, frame length = %d',
frame_length)
frame = yield self._read_bytes(frame_length)
logger.debug('received frame payload: %r', frame)
raise gen.Return(frame)
def _read_bytes(self, n):
return self.with_timeout(self.read_timeout, self.stream.read_bytes(n))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
frame = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
frame_length = struct.pack('!i', len(frame))
self.__wbuf = BytesIO()
with self.io_exception_context():
return self.stream.write(frame_length + frame)
class TTornadoServer(tcpserver.TCPServer):
def __init__(self, processor, iprot_factory, oprot_factory=None,
transport_read_timeout=TTornadoStreamTransport.DEFAULT_READ_TIMEOUT, # noqa
*args, **kwargs):
super(TTornadoServer, self).__init__(*args, **kwargs)
self._processor = processor
self._iprot_factory = iprot_factory
self._oprot_factory = (oprot_factory if oprot_factory is not None
else iprot_factory)
self.transport_read_timeout = transport_read_timeout
# `io_loop` has been deprecated since tornado 4.1 and removed in 5.0
self.__io_loop = getattr(self, 'io_loop', None)
@gen.coroutine
def handle_stream(self, stream, address):
host, port = address
trans = TTornadoStreamTransport(
host=host, port=port, stream=stream,
io_loop=self.__io_loop, read_timeout=self.transport_read_timeout)
try:
oprot = self._oprot_factory.get_protocol(trans)
iprot = self._iprot_factory.get_protocol(TMemoryBuffer())
while not trans.stream.closed():
# TODO: maybe read multiple frames in advance for concurrency
try:
frame = yield trans.read_frame()
except TTransportException as e:
if e.type == TTransportException.END_OF_FILE:
break
else:
raise
iprot.trans.setvalue(frame)
api, seqid, result, call = self._processor.process_in(iprot)
if isinstance(result, TApplicationException):
self._processor.send_exception(oprot, api, result, seqid)
else:
try:
result.success = yield gen.maybe_future(call())
except Exception as e:
# raise if api don't have throws
if not self._processor.handle_exception(e, result):
raise
self._processor.send_result(oprot, api, result, seqid)
except Exception:
logger.exception('thrift exception in handle_stream')
trans.close()
logger.info('client disconnected %s:%d', host, port)
class TTornadoClient(TClient):
@gen.coroutine
def _recv(self, api):
frame = yield self._oprot.trans.read_frame()
self._iprot.trans.setvalue(frame)
result = super(TTornadoClient, self)._recv(api)
raise gen.Return(result)
def close(self):
self._oprot.trans.close()
def make_server(
service, handler, proto_factory=TBinaryProtocolFactory(),
io_loop=None, ssl_options=None,
transport_read_timeout=TTornadoStreamTransport.DEFAULT_READ_TIMEOUT):
processor = TProcessor(service, handler)
if tornado_version >= '5.0':
server = TTornadoServer(processor, iprot_factory=proto_factory,
transport_read_timeout=transport_read_timeout,
ssl_options=ssl_options)
else:
server = TTornadoServer(processor, iprot_factory=proto_factory,
transport_read_timeout=transport_read_timeout,
io_loop=io_loop, ssl_options=ssl_options)
return server
@gen.coroutine
def make_client(
service, host='localhost', port=9090, proto_factory=TBinaryProtocolFactory(),
io_loop=None, ssl_options=None,
connect_timeout=TTornadoStreamTransport.DEFAULT_CONNECT_TIMEOUT,
read_timeout=TTornadoStreamTransport.DEFAULT_READ_TIMEOUT,
url=''):
if url:
parsed_url = urllib.parse.urlparse(url)
host = parsed_url.hostname or host
port = parsed_url.port or port
transport = TTornadoStreamTransport(
host, port, io_loop=io_loop,
ssl_options=ssl_options, read_timeout=read_timeout)
iprot = proto_factory.get_protocol(TMemoryBuffer())
oprot = proto_factory.get_protocol(transport)
yield transport.open(connect_timeout)
client = TTornadoClient(service, iprot, oprot)
raise gen.Return(client)
|
AndreasMadsen/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/sets.py
|
3
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for sets.
@@set_size
@@set_intersection
@@set_union
@@set_difference
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_set_ops
_VALID_DTYPES = set([
dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
dtypes.uint8, dtypes.uint16, dtypes.string])
def set_size(a, validate_indices=True):
"""Compute number of unique elements along last dimension of `a`.
Args:
a: `SparseTensor`, with indices sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a`.
Returns:
`int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
number of unique elements in the corresponding `[0...n-1]` dimension of `a`.
Raises:
TypeError: If `a` is an invalid types.
"""
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
if not isinstance(a, sparse_tensor.SparseTensor):
raise TypeError("Expected `SparseTensor`, got %s." % a)
if a.values.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("Invalid dtype %s." % a.values.dtype)
# pylint: disable=protected-access
return gen_set_ops.set_size(
a.indices, a.values, a.dense_shape, validate_indices)
ops.NotDifferentiable("SetSize")
ops.NotDifferentiable("DenseToDenseSetOperation")
ops.NotDifferentiable("DenseToSparseSetOperation")
ops.NotDifferentiable("SparseToSparseSetOperation")
def _convert_to_tensors_or_sparse_tensors(a, b):
"""Convert to tensor types, and flip order if necessary.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`.
b: `Tensor` or `SparseTensor` of the same type as `a`.
Returns:
Tuple of `(a, b, flipped)`, where `a` and `b` have been converted to
`Tensor` or `SparseTensor`, and `flipped` indicates whether the order has
been flipped to make it dense,sparse instead of sparse,dense (since the set
ops do not support the latter).
"""
a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name="a")
if a.dtype.base_dtype not in _VALID_DTYPES:
raise TypeError("'a' invalid dtype %s." % a.dtype)
b = sparse_tensor.convert_to_tensor_or_sparse_tensor(b, name="b")
if b.dtype.base_dtype != a.dtype.base_dtype:
raise TypeError("Types don't match, %s vs %s." % (a.dtype, b.dtype))
if (isinstance(a, sparse_tensor.SparseTensor) and
not isinstance(b, sparse_tensor.SparseTensor)):
return b, a, True
return a, b, False
def _set_operation(a, b, set_operation, validate_indices=True):
"""Compute set operation of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
set_operation: String indicating set operaiton. See
SetOperationOp::SetOperationFromContext for valid values.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` with the same rank as `a` and `b`, and all but the last
dimension the same. Elements along the last dimension contain the results
of the set operation.
Raises:
TypeError: If inputs are invalid types.
ValueError: If `a` is sparse and `b` is dense.
"""
if isinstance(a, sparse_tensor.SparseTensor):
if isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.sparse_to_sparse_set_operation(
a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape,
set_operation, validate_indices)
else:
raise ValueError("Sparse,Dense is not supported, but Dense,Sparse is. "
"Please flip the order of your inputs.")
elif isinstance(b, sparse_tensor.SparseTensor):
indices, values, shape = gen_set_ops.dense_to_sparse_set_operation(
a, b.indices, b.values, b.dense_shape, set_operation, validate_indices)
else:
indices, values, shape = gen_set_ops.dense_to_dense_set_operation(
a, b, set_operation, validate_indices)
return sparse_tensor.SparseTensor(indices, values, shape)
def set_intersection(a, b, validate_indices=True):
"""Compute set intersection of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
a = [
[
[
[1, 2],
[3],
],
[
[4],
[5, 6],
],
],
]
b = [
[
[
[1, 3],
[2],
],
[
[4, 5],
[5, 6, 7, 8],
],
],
]
set_intersection(a, b) = [
[
[
[1],
[],
],
[
[4],
[5, 6],
],
],
]
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
intersections.
"""
a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)
return _set_operation(a, b, "intersection", validate_indices)
def set_difference(a, b, aminusb=True, validate_indices=True):
"""Compute set difference of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
a = [
[
[
[1, 2],
[3],
],
[
[4],
[5, 6],
],
],
]
b = [
[
[
[1, 3],
[2],
],
[
[4, 5],
[5, 6, 7, 8],
],
],
]
set_difference(a, b, aminusb=True) = [
[
[
[2],
[3],
],
[
[],
[],
],
],
]
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
aminusb: Whether to subtract `b` from `a`, vs vice versa.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
differences.
"""
a, b, flipped = _convert_to_tensors_or_sparse_tensors(a, b)
if flipped:
aminusb = not aminusb
return _set_operation(a, b, "a-b" if aminusb else "b-a", validate_indices)
def set_union(a, b, validate_indices=True):
"""Compute set union of elements in last dimension of `a` and `b`.
All but the last dimension of `a` and `b` must match.
Example:
a = [
[
[
[1, 2],
[3],
],
[
[4],
[5, 6],
],
],
]
b = [
[
[
[1, 3],
[2],
],
[
[4, 5],
[5, 6, 7, 8],
],
],
]
set_union(a, b) = [
[
[
[1, 2, 3],
[2, 3],
],
[
[4, 5],
[5, 6, 7, 8],
],
],
]
Args:
a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
b: `Tensor` or `SparseTensor` of the same type as `a`. If sparse, indices
must be sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
in `a` and `b`.
Returns:
A `SparseTensor` whose shape is the same rank as `a` and `b`, and all but
the last dimension the same. Elements along the last dimension contain the
unions.
"""
a, b, _ = _convert_to_tensors_or_sparse_tensors(a, b)
return _set_operation(a, b, "union", validate_indices)
|
edx/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/db/backends/oracle/creation.py
|
43
|
import sys, time
from django.db.backends.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
self.remember = {}
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
self.remember['user'] = self.connection.settings_dict['USER']
self.remember['passwd'] = self.connection.settings_dict['PASSWORD']
cursor = self.connection.cursor()
if self._test_database_create():
if verbosity >= 1:
print 'Creating test database...'
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
self._execute_test_db_destruction(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test database..."
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print "Creating test user..."
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test user..."
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test user..."
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict["USER"] = TEST_USER
self.connection.settings_dict["PASSWORD"] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
settings_dict['TEST_USER'],
)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict["USER"] = self.remember['user']
self.connection.settings_dict["PASSWORD"] = self.remember['passwd']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print 'Destroying test user...'
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print 'Destroying test database tables...'
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_db(): dbname = %s" % parameters['dbname']
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_user(): username = %s" % parameters['user']
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_execute_test_db_destruction(): dbname=%s" % parameters['dbname']
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_destroy_test_user(): user=%s" % parameters['user']
print "Be patient. This can take some time..."
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print stmt
try:
cursor.execute(stmt)
except Exception, err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
|
sylarcp/anita
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/orm/__init__.py
|
70
|
# orm/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from . import exc
from .mapper import (
Mapper,
_mapper_registry,
class_mapper,
configure_mappers,
reconstructor,
validates
)
from .interfaces import (
EXT_CONTINUE,
EXT_STOP,
PropComparator,
)
from .deprecated_interfaces import (
MapperExtension,
SessionExtension,
AttributeExtension,
)
from .util import (
aliased,
join,
object_mapper,
outerjoin,
polymorphic_union,
was_deleted,
with_parent,
with_polymorphic,
)
from .properties import ColumnProperty
from .relationships import RelationshipProperty
from .descriptor_props import (
ComparableProperty,
CompositeProperty,
SynonymProperty,
)
from .relationships import (
foreign,
remote,
)
from .session import (
Session,
object_session,
sessionmaker,
make_transient,
make_transient_to_detached
)
from .scoping import (
scoped_session
)
from . import mapper as mapperlib
from .query import AliasOption, Query, Bundle
from ..util.langhelpers import public_factory
from .. import util as _sa_util
from . import strategies as _strategies
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
relationship = public_factory(RelationshipProperty, ".orm.relationship")
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw['lazy'] = 'dynamic'
return relationship(argument, **kw)
column_property = public_factory(ColumnProperty, ".orm.column_property")
composite = public_factory(CompositeProperty, ".orm.composite")
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the
same arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(
SomeItem, backref=backref('parent', lazy='subquery'))
"""
return (name, kwargs)
def deferred(*columns, **kw):
"""Indicate a column-based mapped attribute that by default will
not load unless accessed.
:param \*columns: columns to be mapped. This is typically a single
:class:`.Column` object, however a collection is supported in order
to support multiple columns mapped under the same attribute.
:param \**kw: additional keyword arguments passed to
:class:`.ColumnProperty`.
.. seealso::
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kw)
mapper = public_factory(Mapper, ".orm.mapper")
synonym = public_factory(SynonymProperty, ".orm.synonym")
comparable_property = public_factory(ComparableProperty,
".orm.comparable_property")
@_sa_util.deprecated("0.7", message=":func:`.compile_mappers` "
"is renamed to :func:`.configure_mappers`")
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have
been defined.
"""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class
itself is garbage collected, its mapper is automatically disposed of as
well. As such, :func:`.clear_mappers` is only for usage in test suites
that re-use the same classes with different mappings, which is itself an
extremely rare use case - the only such use case is in fact SQLAlchemy's
own test suite, and possibly the test suites of other ORM extension
libraries which intend to test various combinations of mapper construction
upon a fixed set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
from . import strategy_options
joinedload = strategy_options.joinedload._unbound_fn
joinedload_all = strategy_options.joinedload._unbound_all_fn
contains_eager = strategy_options.contains_eager._unbound_fn
defer = strategy_options.defer._unbound_fn
undefer = strategy_options.undefer._unbound_fn
undefer_group = strategy_options.undefer_group._unbound_fn
load_only = strategy_options.load_only._unbound_fn
lazyload = strategy_options.lazyload._unbound_fn
lazyload_all = strategy_options.lazyload_all._unbound_all_fn
subqueryload = strategy_options.subqueryload._unbound_fn
subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn
immediateload = strategy_options.immediateload._unbound_fn
noload = strategy_options.noload._unbound_fn
defaultload = strategy_options.defaultload._unbound_fn
from .strategy_options import Load
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
contains_alias = public_factory(AliasOption, ".orm.contains_alias")
def __go(lcls):
global __all__
from .. import util as sa_util
from . import dynamic
from . import events
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy.orm")
__go(locals())
|
arth-co/saleor
|
refs/heads/master
|
saleor/userprofile/migrations/0014_user_email.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0013_remove_user_email'),
]
operations = [
migrations.AddField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=254),
),
]
|
openstack/networking-bagpipe-l2
|
refs/heads/master
|
networking_bagpipe/bagpipe_bgp/common/__init__.py
|
5
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def plural(x):
if len(x) > 1:
return "s"
else:
return ""
|
cristian69/KernotekV3
|
refs/heads/master
|
venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py
|
116
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys
import os
import marshal
import textwrap
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f = open(fn,'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
|
SWiT/ARTT
|
refs/heads/master
|
utils.py
|
1
|
import cv2, math
def dist(p0, p1):
return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
def findCenter(pts):
x = 0
y = 0
l = len(pts)
for i in range(0,l):
x += pts[i][0]
y += pts[i][1]
return (int(x/l), int(y/l))
def findDiffs(pt0, pt1):
x = pt1[0]-pt0[0]
y = pt1[1]-pt0[1]
return (x,y)
def drawBorder(img, symbol, color, thickness):
for idx,pt0 in enumerate(symbol):
idx+=1
if idx >= len(symbol): idx = 0
pt1 = symbol[idx]
cv2.line(img, pt0, pt1, color, thickness)
return
|
crisisking/udbraaains
|
refs/heads/master
|
brains/mapping/migrations/0001_initial.py
|
1
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Location'
db.create_table('mapping_location', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('x', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('y', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
('name', self.gf('django.db.models.fields.CharField')(max_length=300)),
('building_type', self.gf('django.db.models.fields.CharField')(max_length=4)),
('barricade_level', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)),
('zombies_present', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('is_ruined', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_illuminated', self.gf('django.db.models.fields.BooleanField')(default=True)),
('has_tree', self.gf('django.db.models.fields.BooleanField')(default=False)),
('report_date', self.gf('django.db.models.fields.DateTimeField')(default=None, auto_now=True, blank=True)),
))
db.send_create_signal('mapping', ['Location'])
def backwards(self, orm):
# Deleting model 'Location'
db.delete_table('mapping_location')
models = {
'mapping.location': {
'Meta': {'object_name': 'Location'},
'barricade_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'building_type': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'has_tree': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_illuminated': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ruined': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'report_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'auto_now': 'True', 'blank': 'True'}),
'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'zombies_present': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['mapping']
|
cwoebker/relo
|
refs/heads/master
|
relo/local/util.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
import os
from collections import defaultdict
from relo.core import doctype
from relo.core.log import logger
FILE_Marker = '<files>'
##### Format #####
def paths2tree(paths):
"a list of paths to a list of tree elements"
def attach(branch, trunk):
"""
Insert a branch of directories on its trunk
"""
parts = branch.split('/', 1)
if len(parts) == 1: # is a file
trunk[FILE_Marker].append(parts[0])
else: # is a directory
node, others = parts
if node not in trunk:
trunk[node] = defaultdict(dict, ((FILE_Marker, []),))
attach(others, trunk[node])
main_dict = defaultdict(dict, ((FILE_Marker, []),))
for path in paths:
attach(path, main_dict)
return main_dict
def tree2paths(tree):
"a list of tree elements to a list of paths"
for key, value in tree.iteritems():
#print key + ' -+- ' + repr(value)
#NOT WORKING YEY
#CAN BE DERIVED FROM PRINT TREE FUNCTION
pass
def printTree(tree, indent=0):
"""
Print the file tree structure with proper indentation.
"""
for key, value in tree.iteritems():
if key == FILE_Marker:
if value:
print ' ' * indent + str(value)
else:
print ' ' * indent + str(key)
if isinstance(value, dict):
printTree(value, indent+1)
else:
print ' ' * (indent+1) + str(value)
##### Listing #####
def countFiles(rootDir):
count = 0
for path, dirs, files in os.walk(rootDir):
for file in files:
if file.startswith('.'):
continue
itempath = os.path.join(path, file)
if os.path.islink(itempath):
continue
count += 1
return count
def getTotalSize(rootDir, hidden):
"""
get total size in directory (recursively)
"""
total_size = 0
for root, subFolders, files in os.walk(rootDir):
if not hidden:
subFolders[:] = [sub for sub in subFolders if not sub.startswith('.')]
#print root
for file in files:
if file.startswith('.') and hidden==0:
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
total_size += os.path.getsize(itempath)
return total_size
def listFiles(rootDir, hidden):
returnList = []
total_size = 0
fileList = os.listdir(rootDir)
for file in fileList:
if file.startswith('.') and hidden==0:
continue
itempath = os.path.join(rootDir, file)
if os.path.isdir(itempath) or os.path.islink(itempath):
continue
total_size += os.path.getsize(itempath)
returnList.append(itempath)
logger.debug("Total Size: %d" % total_size)
return total_size, returnList
def recursiveListFiles(rootDir, hidden):
"""
list files in specified directory
"""
fileList = []
total_size = 0
for root, subFolders, files in os.walk(rootDir):
if not hidden:
subFolders[:] = [sub for sub in subFolders if not sub.startswith('.')]
#print root
for file in files:
if file.startswith('.') and hidden==0:
continue
itempath = os.path.join(root, file)
if os.path.islink(itempath):
#print "link found" + itempath
continue
total_size += os.path.getsize(itempath)
fileList.append(itempath)
return total_size, fileList
##### Filters #####
def filterList(fileList):
filteredList = []
for path in fileList:
ext = getFileType(path)
if ext in doctype.__all__:
filteredList.append(path)
return filteredList
def filterDocType(fileList, doctype):
filteredList = []
for path in fileList:
ext = getFileType(path)
if ext == doctype:
filteredList.append(path)
return filteredList
##### Information #####
def getFileType(itempath):
"""
takes a path and returns a filetype
"""
ext = os.path.splitext(itempath)[1]
ext = ext.lstrip('.')
return ext
##### List Conversion #####
def paths2names(pathList):
"""
takes a list of path
returns list of names
"""
nameList = []
for item in pathList:
name = os.path.basename(item)
nameList.append(name)
return nameList
|
arschles/kubernetes-service-catalog
|
refs/heads/downstream
|
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/server.py
|
504
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PORT_NUMBER = 8000
# This class will handles any incoming request.
class HTTPHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World!")
try:
# Create a web server and define the handler to manage the incoming request.
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
print 'Started httpserver on port ' , PORT_NUMBER
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
|
hachreak/invenio-ext
|
refs/heads/master
|
invenio_ext/principal/__init__.py
|
7
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initialize and configure *Flask-Principal* extension."""
from flask import current_app
from flask_login import user_logged_in, user_logged_out
from flask_principal import Principal, Identity, AnonymousIdentity, \
identity_changed, Permission
from six import iteritems
from .wrappers import actions, Action
__all__ = ('setup_app', 'principals', 'permission_required', 'actions',
'Action')
principals = Principal()
class AccAuthorizeActionPermission(Permission):
"""Wrapper for ``acc_authorize_action``."""
def __init__(self, action, **kwargs):
"""Define action and arguments."""
self.action = action
self.params = kwargs
super(self.__class__, self).__init__()
def allows(self, identity):
"""Check if given identity can perform defined action."""
from invenio_access.engine import acc_authorize_action
auth, message = acc_authorize_action(
identity.id, self.action, **dict(
(k, v() if callable(v) else v)
for (k, v) in iteritems(self.params)))
if auth == 0:
return True
current_app.logger.info(message)
return False
def permission_required(action, **kwargs):
"""Check if user can perform given action."""
return AccAuthorizeActionPermission(action, **kwargs).require(
http_exception=401)
def setup_app(app):
"""Setup principal extension."""
principals.init_app(app)
@user_logged_in.connect_via(app)
def _logged_in(sender, user):
identity_changed.send(sender, identity=Identity(user.get_id()))
@user_logged_out.connect_via(app)
def _logged_out(sender, user):
identity_changed.send(sender, identity=AnonymousIdentity())
# @identity_loaded.connect_via(app)
# def on_identity_loaded(sender, identity):
# """One can modify idenity object."""
# pass
return app
|
EUNIX-TRIX/al-go-rithms
|
refs/heads/master
|
deep_learning/python/neuralnetwork.py
|
5
|
import numpy as np
import dill
class neural_network:
def __init__(self, num_layers, num_nodes, activation_function, cost_function):
self.num_layers = num_layers
self.num_nodes = num_nodes
self.layers = []
self.cost_function = cost_function
for i in range(num_layers):
if i != num_layers-1:
layer_i = layer(num_nodes[i], num_nodes[i+1], activation_function[i])
else:
layer_i = layer(num_nodes[i], 0, activation_function[i])
self.layers.append(layer_i)
def train(self, batch_size, inputs, labels, num_epochs, learning_rate, filename):
self.batch_size = batch_size
self.learning_rate = learning_rate
for j in range(num_epochs):
i = 0
print("== EPOCH: ", j, " ==")
while i+batch_size != len(inputs):
self.error = 0
# input_batch = []
# label_batch = []
# # print(i)
# for i in range(i, i+batch_size):
# input_batch.append(inputs[i])
# label_batch.append(labels[i])
self.forward_pass(inputs[i:i+batch_size])
self.calculate_error(labels[i:i+batch_size])
self.back_pass(labels[i:i+batch_size])
i += batch_size
print("Error: ", self.error)
dill.dump_session(filename)
def forward_pass(self, inputs):
self.layers[0].activations = inputs
for i in range(self.num_layers-1):
self.layers[i].add_bias(self.batch_size, self.layers[i+1].num_nodes_in_layer)
temp = np.add(np.matmul(self.layers[i].activations, self.layers[i].weights_for_layer), self.layers[i].bias_for_layer)
if self.layers[i+1].activation_function == "sigmoid":
self.layers[i+1].activations = self.sigmoid(temp)
elif self.layers[i+1].activation_function == "softmax":
self.layers[i+1].activations = self.softmax(temp)
elif self.layers[i+1].activation_function == "relu":
self.layers[i+1].activations = self.relu(temp)
elif self.layers[i+1].activation_function == "tanh":
self.layers[i+1].activations = self.tanh(temp)
else:
self.layers[i+1].activations = temp
def relu(self, layer):
layer[layer < 0] = 0
return layer
def softmax(self, layer):
exp = np.exp(layer)
return exp/np.sum(exp, axis=1, keepdims=True)
def sigmoid(self, layer):
return np.divide(1, np.add(1, np.exp(layer)))
def tanh(self, layer):
return np.tanh(layer)
def calculate_error(self, labels):
if len(labels[0]) != self.layers[self.num_layers-1].num_nodes_in_layer:
print ("Error: Label is not of the same shape as output layer.")
print("Label: ", len(labels), " : ", len(labels[0]))
print("Out: ", len(self.layers[self.num_layers-1].activations), " : ", len(self.layers[self.num_layers-1].activations[0]))
return
if self.cost_function == "mean_squared":
self.error = np.mean(np.divide(np.square(np.subtract(labels, self.layers[self.num_layers-1].activations)), 2))
elif self.cost_function == "cross_entropy":
self.error = np.negative(np.sum(np.multiply(labels, np.log(self.layers[self.num_layers-1].activations))))
def back_pass(self, labels):
# if self.cost_function == "cross_entropy" and self.layers[self.num_layers-1].activation_function == "softmax":
targets = labels
i = self.num_layers-1
y = self.layers[i].activations
deltaw = np.matmul(np.asarray(self.layers[i-1].activations).T, np.multiply(y, np.multiply(1-y, targets-y)))
new_weights = self.layers[i-1].weights_for_layer - self.learning_rate * deltaw
for i in range(i-1, 0, -1):
y = self.layers[i].activations
deltaw = np.matmul(np.asarray(self.layers[i-1].activations).T, np.multiply(y, np.multiply(1-y, np.sum(np.multiply(new_weights, self.layers[i].weights_for_layer),axis=1).T)))
self.layers[i].weights_for_layer = new_weights
new_weights = self.layers[i-1].weights_for_layer - self.learning_rate * deltaw
self.layers[0].weights_for_layer = new_weights
def predict(self, filename, input):
dill.load_session(filename)
self.batch_size = 1
self.forward_pass(input)
a = self.layers[self.num_layers-1].activations
a[np.where(a==np.max(a))] = 1
a[np.where(a!=np.max(a))] = 0
return a
def check_accuracy(self, filename, inputs, labels):
dill.load_session(filename)
self.batch_size = len(inputs)
self.forward_pass(inputs)
a = self.layers[self.num_layers-1].activations
num_classes = 10
targets = np.array([a]).reshape(-1)
a = np.asarray(a)
one_hot_labels = np.eye(num_classes)[a.astype(int)]
total=0
correct=0
for i in range(len(a)):
total += 1
if np.equal(one_hot_labels[i], labels[i]).all():
correct += 1
print("Accuracy: ", correct*100/total)
def load_model(self, filename):
dill.load_session(filename)
class layer:
def __init__(self, num_nodes_in_layer, num_nodes_in_next_layer, activation_function):
self.num_nodes_in_layer = num_nodes_in_layer
self.activation_function = activation_function
self.activations = np.zeros([num_nodes_in_layer,1])
if num_nodes_in_next_layer != 0:
self.weights_for_layer = np.random.normal(0, 0.001, size=(num_nodes_in_layer, num_nodes_in_next_layer))
else:
self.weights_for_layer = None
self.bias_for_layer = None
def add_bias(self, batch_size, num_nodes_in_next_layer):
if num_nodes_in_next_layer != 0:
self.bias_for_layer = np.random.normal(0, 0.01, size=(batch_size, num_nodes_in_next_layer))
|
jpshort/odoo
|
refs/heads/8.0
|
openerp/report/print_fnc.py
|
458
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
functions = {
'today': lambda x: time.strftime('%d/%m/%Y', time.localtime()).decode('latin1')
}
#
# TODO: call an object internal function too
#
def print_fnc(fnc, arg):
if fnc in functions:
return functions[fnc](arg)
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
morpheby/levelup-by
|
refs/heads/master
|
cms/djangoapps/contentstore/features/problem-editor.py
|
2
|
# disable missing docstring
#pylint: disable=C0111
from lettuce import world, step
from nose.tools import assert_equal, assert_true # pylint: disable=E0611
from common import type_in_codemirror
DISPLAY_NAME = "Display Name"
MAXIMUM_ATTEMPTS = "Maximum Attempts"
PROBLEM_WEIGHT = "Problem Weight"
RANDOMIZATION = 'Randomization'
SHOW_ANSWER = "Show Answer"
############### ACTIONS ####################
@step('I have created a Blank Common Problem$')
def i_created_blank_common_problem(step):
world.create_component_instance(
step,
'.large-problem-icon',
'problem',
'.xmodule_CapaModule',
'blank_common.yaml'
)
@step('I edit and select Settings$')
def i_edit_and_select_settings(step):
world.edit_component_and_select_settings()
@step('I see five alphabetized settings and their expected values$')
def i_see_five_settings_with_values(step):
world.verify_all_setting_entries(
[
[DISPLAY_NAME, "Blank Common Problem", True],
[MAXIMUM_ATTEMPTS, "", False],
[PROBLEM_WEIGHT, "", False],
[RANDOMIZATION, "Never", False],
[SHOW_ANSWER, "Finished", False]
])
@step('I can modify the display name')
def i_can_modify_the_display_name(step):
# Verifying that the display name can be a string containing a floating point value
# (to confirm that we don't throw an error because it is of the wrong type).
index = world.get_setting_entry_index(DISPLAY_NAME)
world.css_fill('.wrapper-comp-setting .setting-input', '3.4', index=index)
if world.is_firefox():
world.trigger_event('.wrapper-comp-setting .setting-input', index=index)
verify_modified_display_name()
@step('my display name change is persisted on save')
def my_display_name_change_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name()
@step('I can specify special characters in the display name')
def i_can_modify_the_display_name_with_special_chars(step):
index = world.get_setting_entry_index(DISPLAY_NAME)
world.css_fill('.wrapper-comp-setting .setting-input', "updated ' \" &", index=index)
if world.is_firefox():
world.trigger_event('.wrapper-comp-setting .setting-input', index=index)
verify_modified_display_name_with_special_chars()
@step('my special characters and persisted on save')
def special_chars_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name_with_special_chars()
@step('I can revert the display name to unset')
def can_revert_display_name_to_unset(step):
world.revert_setting_entry(DISPLAY_NAME)
verify_unset_display_name()
@step('my display name is unset on save')
def my_display_name_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_unset_display_name()
@step('I can select Per Student for Randomization')
def i_can_select_per_student_for_randomization(step):
world.browser.select(RANDOMIZATION, "Per Student")
verify_modified_randomization()
@step('my change to randomization is persisted')
def my_change_to_randomization_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_randomization()
@step('I can revert to the default value for randomization')
def i_can_revert_to_default_for_randomization(step):
world.revert_setting_entry(RANDOMIZATION)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Never", False)
@step('I can set the weight to "(.*)"?')
def i_can_set_weight(step, weight):
set_weight(weight)
verify_modified_weight()
@step('my change to weight is persisted')
def my_change_to_weight_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_weight()
@step('I can revert to the default value of unset for weight')
def i_can_revert_to_default_for_unset_weight(step):
world.revert_setting_entry(PROBLEM_WEIGHT)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the weight to "(.*)", it remains unset')
def set_the_weight_to_abc(step, bad_weight):
set_weight(bad_weight)
# We show the clear button immediately on type, hence the "True" here.
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", True)
world.save_component_and_reopen(step)
# But no change was actually ever sent to the model, so on reopen, explicitly_set is False
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the max attempts to "(.*)", it will persist as a valid integer$')
def set_the_max_attempts(step, max_attempts_set):
# on firefox with selenium, the behaviour is different. eg 2.34 displays as 2.34 and is persisted as 2
index = world.get_setting_entry_index(MAXIMUM_ATTEMPTS)
world.css_fill('.wrapper-comp-setting .setting-input', max_attempts_set, index=index)
if world.is_firefox():
world.trigger_event('.wrapper-comp-setting .setting-input', index=index)
world.save_component_and_reopen(step)
value = int(world.css_value('input.setting-input', index=index))
assert value >= 0
@step('Edit High Level Source is not visible')
def edit_high_level_source_not_visible(step):
verify_high_level_source_links(step, False)
@step('Edit High Level Source is visible')
def edit_high_level_source_links_visible(step):
verify_high_level_source_links(step, True)
@step('If I press Cancel my changes are not persisted')
def cancel_does_not_save_changes(step):
world.cancel_component(step)
step.given("I edit and select Settings")
step.given("I see five alphabetized settings and their expected values")
@step('I have created a LaTeX Problem')
def create_latex_problem(step):
world.click_new_component_button(step, '.large-problem-icon')
def animation_done(_driver):
return world.browser.evaluate_script("$('div.new-component').css('display')") == 'none'
world.wait_for(animation_done)
# Go to advanced tab.
world.css_click('#ui-id-2')
world.click_component_from_menu("problem", "latex_problem.yaml", '.xmodule_CapaModule')
@step('I edit and compile the High Level Source')
def edit_latex_source(step):
open_high_level_source()
type_in_codemirror(1, "hi")
world.css_click('.hls-compile')
@step('my change to the High Level Source is persisted')
def high_level_source_persisted(step):
def verify_text(driver):
css_sel = '.problem div>span'
return world.css_text(css_sel) == 'hi'
world.wait_for(verify_text)
@step('I view the High Level Source I see my changes')
def high_level_source_in_editor(step):
open_high_level_source()
assert_equal('hi', world.css_value('.source-edit-box'))
def verify_high_level_source_links(step, visible):
if visible:
assert_true(world.is_css_present('.launch-latex-compiler'),
msg="Expected to find the latex button but it is not present.")
else:
assert_true(world.is_css_not_present('.launch-latex-compiler'),
msg="Expected not to find the latex button but it is present.")
world.cancel_component(step)
if visible:
assert_true(world.is_css_present('.upload-button'),
msg="Expected to find the upload button but it is not present.")
else:
assert_true(world.is_css_not_present('.upload-button'),
msg="Expected not to find the upload button but it is present.")
def verify_modified_weight():
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "3.5", True)
def verify_modified_randomization():
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Per Student", True)
def verify_modified_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, '3.4', True)
def verify_modified_display_name_with_special_chars():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, "updated ' \" &", True)
def verify_unset_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, 'Blank Advanced Problem', False)
def set_weight(weight):
index = world.get_setting_entry_index(PROBLEM_WEIGHT)
world.css_fill('.wrapper-comp-setting .setting-input', weight, index=index)
if world.is_firefox():
world.trigger_event('.wrapper-comp-setting .setting-input', index=index, event='blur')
world.trigger_event('a.save-button', event='focus')
def open_high_level_source():
world.css_click('a.edit-button')
world.css_click('.launch-latex-compiler > a')
|
andfoy/margffoy-tuay-server
|
refs/heads/master
|
env/lib/python2.7/site-packages/reportlab-3.2.0-py2.7-linux-x86_64.egg/reportlab/lib/enums.py
|
35
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/enums.py
__version__=''' $Id$ '''
__doc__="""
Container for constants. Hardly used!
"""
TA_LEFT = 0
TA_CENTER = 1
TA_RIGHT = 2
TA_JUSTIFY = 4
|
luvit/gyp
|
refs/heads/luvit-dev
|
test/additional-targets/src/dir1/emit.py
|
337
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
f = open(sys.argv[1], 'wb')
f.write('Hello from emit.py\n')
f.close()
|
jlspyaozhongkai/Uter
|
refs/heads/master
|
third_party_build/Python-2.7.9/lib/python2.7/xml/dom/pulldom.py
|
322
|
import xml.sax
import xml.sax.handler
import types
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print exception
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def next(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if type(stream_or_string) in _StringTypes:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
|
NeCTAR-RC/neutron
|
refs/heads/master
|
neutron/plugins/vmware/dbexts/nsx_models.py
|
26
|
# Copyright 2015 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NSX data models.
This module defines data models used by the VMware NSX plugin family.
"""
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db import model_base
from neutron.db import models_v2
class TzNetworkBinding(model_base.BASEV2):
"""Represents a binding of a virtual network with a transport zone.
This model class associates a Neutron network with a transport zone;
optionally a vlan ID might be used if the binding type is 'bridge'
"""
__tablename__ = 'tz_network_bindings'
# TODO(arosen) - it might be worth while refactoring the how this data
# is stored later so every column does not need to be a primary key.
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# 'flat', 'vlan', stt' or 'gre'
binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='tz_network_bindings_binding_type'),
nullable=False, primary_key=True)
phy_uuid = sa.Column(sa.String(36), primary_key=True, default='')
vlan_id = sa.Column(sa.Integer, primary_key=True,
autoincrement=False, default=0)
def __init__(self, network_id, binding_type, phy_uuid, vlan_id):
self.network_id = network_id
self.binding_type = binding_type
self.phy_uuid = phy_uuid
self.vlan_id = vlan_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%s,%s)>" % (self.network_id,
self.binding_type,
self.phy_uuid,
self.vlan_id)
class NeutronNsxNetworkMapping(model_base.BASEV2):
"""Maps neutron network identifiers to NSX identifiers.
Because of chained logical switches more than one mapping might exist
for a single Neutron network.
"""
__tablename__ = 'neutron_nsx_network_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
class NeutronNsxSecurityGroupMapping(model_base.BASEV2):
"""Backend mappings for Neutron Security Group identifiers.
This class maps a neutron security group identifier to the corresponding
NSX security profile identifier.
"""
__tablename__ = 'neutron_nsx_security_group_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('securitygroups.id',
ondelete="CASCADE"),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
class NeutronNsxPortMapping(model_base.BASEV2):
"""Represents the mapping between neutron and nsx port uuids."""
__tablename__ = 'neutron_nsx_port_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nsx_switch_id = sa.Column(sa.String(36))
nsx_port_id = sa.Column(sa.String(36), nullable=False)
def __init__(self, neutron_id, nsx_switch_id, nsx_port_id):
self.neutron_id = neutron_id
self.nsx_switch_id = nsx_switch_id
self.nsx_port_id = nsx_port_id
class NeutronNsxRouterMapping(model_base.BASEV2):
"""Maps neutron router identifiers to NSX identifiers."""
__tablename__ = 'neutron_nsx_router_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36))
class MultiProviderNetworks(model_base.BASEV2):
"""Networks provisioned through multiprovider extension."""
__tablename__ = 'multi_provider_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
def __init__(self, network_id):
self.network_id = network_id
class NetworkConnection(model_base.BASEV2, models_v2.HasTenant):
"""Defines a connection between a network gateway and a network."""
# We use port_id as the primary key as one can connect a gateway
# to a network in multiple ways (and we cannot use the same port form
# more than a single gateway)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'))
segmentation_type = sa.Column(
sa.Enum('flat', 'vlan',
name='networkconnections_segmentation_type'))
segmentation_id = sa.Column(sa.Integer)
__table_args__ = (sa.UniqueConstraint(network_gateway_id,
segmentation_type,
segmentation_id),
model_base.BASEV2.__table_args__)
# Also, storing port id comes back useful when disconnecting a network
# from a gateway
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
primary_key=True)
class NetworkGatewayDeviceReference(model_base.BASEV2):
id = sa.Column(sa.String(36), primary_key=True)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'),
primary_key=True)
interface_name = sa.Column(sa.String(64), primary_key=True)
class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
nsx_id = sa.Column(sa.String(36))
# Optional name for the gateway device
name = sa.Column(sa.String(255))
# Transport connector type. Not using enum as range of
# connector types might vary with backend version
connector_type = sa.Column(sa.String(10))
# Transport connector IP Address
connector_ip = sa.Column(sa.String(64))
# operational status
status = sa.Column(sa.String(16))
class NetworkGateway(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Defines the data model for a network gateway."""
name = sa.Column(sa.String(255))
# Tenant id is nullable for this resource
tenant_id = sa.Column(sa.String(36))
default = sa.Column(sa.Boolean())
devices = orm.relationship(NetworkGatewayDeviceReference,
backref='networkgateways',
cascade='all,delete')
network_connections = orm.relationship(NetworkConnection, lazy='joined')
class MacLearningState(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model using the backref attribute.
# This will instruct SQLAlchemy to eagerly load this association.
port = orm.relationship(
models_v2.Port,
backref=orm.backref("mac_learning_state", lazy='joined',
uselist=False, cascade='delete'))
class LsnPort(models_v2.model_base.BASEV2):
__tablename__ = 'lsn_port'
lsn_port_id = sa.Column(sa.String(36), primary_key=True)
lsn_id = sa.Column(sa.String(36),
sa.ForeignKey('lsn.lsn_id', ondelete="CASCADE"),
nullable=False)
sub_id = sa.Column(sa.String(36), nullable=False, unique=True)
mac_addr = sa.Column(sa.String(32), nullable=False, unique=True)
def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id):
self.lsn_port_id = lsn_port_id
self.lsn_id = lsn_id
self.sub_id = subnet_id
self.mac_addr = mac_address
class Lsn(models_v2.model_base.BASEV2):
__tablename__ = 'lsn'
lsn_id = sa.Column(sa.String(36), primary_key=True)
net_id = sa.Column(sa.String(36), nullable=False)
def __init__(self, net_id, lsn_id):
self.net_id = net_id
self.lsn_id = lsn_id
class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
default = sa.Column(sa.Boolean, default=False, server_default=sql.false())
min = sa.Column(sa.Integer, nullable=False)
max = sa.Column(sa.Integer, nullable=True)
qos_marking = sa.Column(sa.Enum('untrusted', 'trusted',
name='qosqueues_qos_marking'))
dscp = sa.Column(sa.Integer)
class PortQueueMapping(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"),
primary_key=True)
# Add a relationship to the Port model adding a backref which will
# allow SQLAlchemy for eagerly load the queue binding
port = orm.relationship(
models_v2.Port,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
class NetworkQueueMapping(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id",
ondelete="CASCADE"))
# Add a relationship to the Network model adding a backref which will
# allow SQLAlcremy for eagerly load the queue binding
network = orm.relationship(
models_v2.Network,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
|
alexmandujano/django
|
refs/heads/master
|
tests/utils_tests/test_archive.py
|
102
|
import os
import shutil
import tempfile
from django.utils import unittest
from django.utils.archive import Archive, extract
from django.utils._os import upath
TEST_DIR = os.path.join(os.path.dirname(upath(__file__)), 'archives')
class ArchiveTester(object):
archive = None
def setUp(self):
"""
Create temporary directory for testing extraction.
"""
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmpdir)
self.archive_path = os.path.join(TEST_DIR, self.archive)
# Always start off in TEST_DIR.
os.chdir(TEST_DIR)
def tearDown(self):
os.chdir(self.old_cwd)
def test_extract_method(self):
with Archive(self.archive) as archive:
archive.extract(self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_method_no_to_path(self):
os.chdir(self.tmpdir)
with Archive(self.archive_path) as archive:
archive.extract()
self.check_files(self.tmpdir)
def test_extract_function(self):
extract(self.archive_path, self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_function_no_to_path(self):
os.chdir(self.tmpdir)
extract(self.archive_path)
self.check_files(self.tmpdir)
def check_files(self, tmpdir):
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '2')))
class TestZip(ArchiveTester, unittest.TestCase):
archive = 'foobar.zip'
class TestTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar'
class TestGzipTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.gz'
class TestBzip2Tar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.bz2'
|
ramondelafuente/ansible
|
refs/heads/devel
|
test/units/template/test_templar.py
|
59
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible import constants as C
from ansible.errors import *
from ansible.plugins import filter_loader, lookup_loader, module_loader
from ansible.plugins.strategy import SharedPluginLoaderObj
from ansible.template import Templar
from units.mock.loader import DictDataLoader
class TestTemplar(unittest.TestCase):
def setUp(self):
fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
shared_loader = SharedPluginLoaderObj()
variables = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
)
self.templar = Templar(loader=fake_loader, variables=variables)
def tearDown(self):
pass
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("foo", convert_bare=True), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test set_available_variables()
templar.set_available_variables(variables=dict(foo="bam"))
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
|
macedir/Software-Development
|
refs/heads/master
|
Python/shortest dec to hex.py
|
1
|
while True:
try:
print(hex(int(input('What do you want to convert into hexadecimal? '))))
print('')
except (ValueError, TypeError):
print('Invalid input')
print('')
|
olegpshenichniy/Booktype
|
refs/heads/master
|
lib/booktype/apps/reader/views.py
|
6
|
# This file is part of Booktype.
# Copyright (c) 2014 Helmy Giacoman <helmy.giacoman@sourcefabric.org>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import json
from django.core.exceptions import PermissionDenied
from django.views import static
from django.http import Http404
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import DetailView, DeleteView, UpdateView
from braces.views import LoginRequiredMixin, JSONResponseMixin
from booktype.apps.core import views
from booktype.utils import misc, security
from booktype.utils.misc import get_available_themes
from booktype.utils.book import remove_book
from booktype.apps.core.views import BasePageView, NeverCacheMixin
from booktype.apps.themes.models import BookTheme
from booki.editor.models import Book, BookHistory, BookToc, Chapter
from .forms import EditBookInfoForm
logger = logging.getLogger('booktype')
class BaseReaderView(object):
"""
Base Reader View Class with the common attributes
"""
model = Book
slug_field = 'url_title'
slug_url_kwarg = 'bookid'
context_object_name = 'book'
not_found = False
def get(self, request, *args, **kwargs):
try:
return super(BaseReaderView, self).get(request, *args, **kwargs)
except Http404:
self.not_found = True
context = dict(
not_found_object=_("Book"),
object_name=self.kwargs['bookid']
)
return self.render_to_response(context)
def get_template_names(self):
if self.not_found:
return "reader/errors/_does_not_exist.html"
return super(BaseReaderView, self).get_template_names()
def render_to_response(self, context, **response_kwargs):
if self.not_found:
response_kwargs.setdefault('status', 404)
context.update({
'request': self.request,
'page_title': _("%(object)s not found!") % {
'object': context['not_found_object']
},
'title': _("Error 404")
})
return super(BaseReaderView, self).render_to_response(
context, **response_kwargs)
class PublishedBookView(BaseReaderView, BasePageView, DetailView):
# TODO: implement functionality when book is marked as published
template_name = "reader/book_published.html"
def render_to_response(self, context, **response_kwargs):
try:
book = self.get_object()
if book:
return redirect('reader:infopage', bookid=book.url_title)
except:
return super(PublishedBookView, self).render_to_response(
context, **response_kwargs)
class InfoPageView(views.SecurityMixin, BaseReaderView, BasePageView, DetailView):
SECURITY_BRIDGE = security.BookSecurity
template_name = "reader/book_info_page.html"
page_title = _("Book Details Page")
title = _("Book Details")
def check_permissions(self, request, *args, **kwargs):
can_view_book_info = self.security.has_perm("reader.can_view_book_info")
can_view_hidden_book_info = self.security.has_perm("reader.can_view_hidden_book_info")
if self.security.book.hidden and not can_view_hidden_book_info:
raise PermissionDenied
elif not can_view_book_info and not can_view_hidden_book_info:
raise PermissionDenied
def get_context_data(self, **kwargs):
book = self.object
book_version = book.get_version()
book_collaborators_ids = BookHistory.objects.filter(
version=book_version, kind=2).values_list('user', flat=True)
context = super(InfoPageView, self).get_context_data(**kwargs)
context['is_admin'] = self.request.user.is_superuser
context['book_admins'] = book.bookipermission_set.filter(permission=1)
context['book_collaborators'] = User.objects.filter(
id__in=book_collaborators_ids)
context['book_history'] = BookHistory.objects.filter(
version=book_version).order_by('-modified')[:20]
context['book_group'] = book.group
context['is_book_admin'] = self.security.is_admin()
return context
class PermissionsView(BaseReaderView, BasePageView, JSONResponseMixin, DetailView):
"""
Returns the list of permissions a user has on one book
"""
def render_to_response(self, context, **response_kwargs):
book = self.object
book_security = security.get_security_for_book(self.request.user, book)
book_permissions = security.get_user_permissions(self.request.user, book)
return self.render_json_response({
'admin': book_security.is_admin(),
'permissions': book_permissions,
})
class SingleNextMixin(object):
"""
Just adds a next attribute to be used as redirect value
after post action
"""
def dispatch(self, request, *args, **kwargs):
_request_data = getattr(request, request.method)
self.next = _request_data.get('next', None)
return super(SingleNextMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SingleNextMixin, self).get_context_data(**kwargs)
context['next'] = self.next
return context
class EditBookInfoView(SingleNextMixin, LoginRequiredMixin,
BaseReaderView, UpdateView):
template_name = "reader/book_info_edit.html"
context_object_name = 'book'
def get_form(self, form_class=EditBookInfoForm):
return form_class(user=self.request.user, **self.get_form_kwargs())
def form_valid(self, form):
self.object = form.save()
all_ok = True
if 'book_cover' in form.files.keys():
try:
fh, fname = misc.save_uploaded_as_file(form.files['book_cover'])
self.object.set_cover(fname)
os.unlink(fname)
self.object.save()
except Exception, e:
logger.exception(e)
all_ok = False
if all_ok:
messages.success(self.request, _('Successfully changed book info.'))
else:
messages.warning(self.request, _('Could not upload cover image.'))
self.template_name = "reader/book_info_edit_redirect.html"
return self.render_to_response(context=self.get_context_data())
class DeleteBookView(SingleNextMixin, LoginRequiredMixin,
BaseReaderView, DeleteView):
template_name = "reader/book_delete.html"
def post(self, *args, **kwargs):
request = self.request
book = self.object = self.get_object()
title = request.POST.get("title", "")
book_security = security.get_security_for_book(request.user, book)
book_permissions = security.get_user_permissions(request.user, book)
self.template_name = "reader/book_delete_error.html"
if (book_security.has_perm('edit.delete_book') and title.strip() == book.title.strip()):
remove_book(book)
self.template_name = "reader/book_delete_redirect.html"
messages.success(request, _('Book successfully deleted.'))
return self.render_to_response(context=self.get_context_data())
class DraftChapterView(views.SecurityMixin, BaseReaderView, BasePageView, DetailView):
SECURITY_BRIDGE = security.BookSecurity
template_name = "reader/book_draft_page.html"
page_title = _("Chapter Draft")
title = ""
def check_permissions(self, request, *args, **kwargs):
can_view_draft = self.security.has_perm("reader.can_view_draft")
can_view_hidden_draft = self.security.has_perm("reader.can_view_hidden_draft")
if self.security.book.hidden and not can_view_hidden_draft:
raise PermissionDenied
elif not can_view_draft and not can_view_hidden_draft:
raise PermissionDenied
def get_context_data(self, **kwargs):
book = self.object
content = None
book_version = book.get_version(self.kwargs.get('version', None))
context = super(DraftChapterView, self).get_context_data(**kwargs)
if 'chapter' in self.kwargs:
try:
content = get_object_or_404(Chapter, version=book_version,
url_title=self.kwargs['chapter'])
except Http404:
self.not_found = True
context = dict(
not_found_object=_("Chapter"),
object_name=self.kwargs['chapter']
)
return context
toc_items = BookToc.objects.filter(
version=book_version).order_by("-weight")
for chapter in toc_items:
if not content and chapter.is_chapter():
content = chapter.chapter
break
context['content'] = content
context['toc_items'] = toc_items
context['book_version'] = book_version.get_version()
return context
class FullView(views.SecurityMixin, BaseReaderView, BasePageView, DetailView):
SECURITY_BRIDGE = security.BookSecurity
template_name = "reader/book_full_view.html"
page_title = _("Book full view")
title = ""
def check_permissions(self, request, *args, **kwargs):
can_view_full_page = self.security.has_perm("reader.can_view_full_page")
can_view_hidden_full_page = self.security.has_perm("reader.can_view_hidden_full_page")
if self.security.book.hidden and not can_view_hidden_full_page:
raise PermissionDenied
elif not can_view_full_page and not can_view_hidden_full_page:
raise PermissionDenied
def get_context_data(self, **kwargs):
book = self.object
context = super(FullView, self).get_context_data(**kwargs)
book_version = book.get_version(self.kwargs.get('version', None))
toc_items = BookToc.objects.filter(
version=book_version).order_by("-weight")
available_themes = get_available_themes()
theme_active = available_themes[0]
try:
theme = BookTheme.objects.get(book=book)
# override default one if it's available
if theme.active in available_themes:
theme_active = theme.active
else:
theme.active = theme_active
theme.save()
except BookTheme.DoesNotExist:
theme = BookTheme(book=book, active=theme_active)
theme.save()
context['book_version'] = book_version.get_version()
context['toc_items'] = toc_items
context['theme'] = book.booktheme
return context
class BookCoverView(NeverCacheMixin, BaseReaderView, DetailView):
"""
Simple DetailView inherit clase to serve the book cover image
"""
http_method_names = [u'get']
def render_to_response(self, context, **response_kwargs):
"""
Override render_to_response to serve the book cover static image
"""
return static.serve(
self.request, self.object.cover.name, settings.MEDIA_ROOT)
|
kneeks/is210-week-03-warmup
|
refs/heads/master
|
task_05.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Splinter would be proud."""
TEENAGE_MUTANT_NINJAS = ('Michaelangelo. Leonardo. Rafael. Donatello. Heroes '
'in a half shell.')
TURTLE_POWER = TEENAGE_MUTANT_NINJAS.split('. ')
|
bjodah/mpmath
|
refs/heads/master
|
mpmath/tests/test_summation.py
|
15
|
from mpmath import *
def test_sumem():
mp.dps = 15
assert sumem(lambda k: 1/k**2.5, [50, 100]).ae(0.0012524505324784962)
assert sumem(lambda k: k**4 + 3*k + 1, [10, 100]).ae(2050333103)
def test_nsum():
mp.dps = 15
assert nsum(lambda x: x**2, [1, 3]) == 14
assert nsum(lambda k: 1/factorial(k), [0, inf]).ae(e)
assert nsum(lambda k: (-1)**(k+1) / k, [1, inf]).ae(log(2))
assert nsum(lambda k: (-1)**(k+1) / k**2, [1, inf]).ae(pi**2 / 12)
assert nsum(lambda k: (-1)**k / log(k), [2, inf]).ae(0.9242998972229388)
assert nsum(lambda k: 1/k**2, [1, inf]).ae(pi**2 / 6)
assert nsum(lambda k: 2**k/fac(k), [0, inf]).ae(exp(2))
assert nsum(lambda k: 1/k**2, [4, inf], method='e').ae(0.2838229557371153)
def test_nprod():
mp.dps = 15
assert nprod(lambda k: exp(1/k**2), [1,inf], method='r').ae(exp(pi**2/6))
assert nprod(lambda x: x**2, [1, 3]) == 36
def test_fsum():
mp.dps = 15
assert fsum([]) == 0
assert fsum([-4]) == -4
assert fsum([2,3]) == 5
assert fsum([1e-100,1]) == 1
assert fsum([1,1e-100]) == 1
assert fsum([1e100,1]) == 1e100
assert fsum([1,1e100]) == 1e100
assert fsum([1e-100,0]) == 1e-100
assert fsum([1e-100,1e100,1e-100]) == 1e100
assert fsum([2,1+1j,1]) == 4+1j
assert fsum([2,inf,3]) == inf
assert fsum([2,-1], absolute=1) == 3
assert fsum([2,-1], squared=1) == 5
assert fsum([1,1+j], squared=1) == 1+2j
assert fsum([1,3+4j], absolute=1) == 6
assert fsum([1,2+3j], absolute=1, squared=1) == 14
assert isnan(fsum([inf,-inf]))
assert fsum([inf,-inf], absolute=1) == inf
assert fsum([inf,-inf], squared=1) == inf
assert fsum([inf,-inf], absolute=1, squared=1) == inf
assert iv.fsum([1,mpi(2,3)]) == mpi(3,4)
def test_fprod():
mp.dps = 15
assert fprod([]) == 1
assert fprod([2,3]) == 6
|
tuxos/Django-facebook
|
refs/heads/master
|
docs/docs_env/Lib/encodings/bz2_codec.py
|
501
|
""" Python 'bz2_codec' Codec - bz2 compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Adapted by Raymond Hettinger from zlib_codec.py which was written
by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import bz2 # this codec needs the optional bz2 module !
### Codec APIs
def bz2_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = bz2.compress(input)
return (output, len(input))
def bz2_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = bz2.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return bz2_encode(input, errors)
def decode(self, input, errors='strict'):
return bz2_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = bz2.BZ2Compressor()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = bz2.BZ2Compressor()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = bz2.BZ2Decompressor()
def decode(self, input, final=False):
try:
return self.decompressobj.decompress(input)
except EOFError:
return ''
def reset(self):
self.decompressobj = bz2.BZ2Decompressor()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name="bz2",
encode=bz2_encode,
decode=bz2_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
taiyuanfang/gyp
|
refs/heads/master
|
test/win/gyptest-link-base-address.py
|
137
|
#!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the base address setting is extracted properly.
"""
import TestGyp
import re
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('base-address.gyp', chdir=CHDIR)
test.build('base-address.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# Extract the image base address from the headers output.
image_base_reg_ex = re.compile('.*\s+([0-9]+) image base.*', re.DOTALL)
exe_headers = GetHeaders('test_base_specified_exe.exe')
exe_match = image_base_reg_ex.match(exe_headers)
if not exe_match or not exe_match.group(1):
test.fail_test()
if exe_match.group(1) != '420000':
test.fail_test()
dll_headers = GetHeaders('test_base_specified_dll.dll')
dll_match = image_base_reg_ex.match(dll_headers)
if not dll_match or not dll_match.group(1):
test.fail_test()
if dll_match.group(1) != '10420000':
test.fail_test()
default_exe_headers = GetHeaders('test_base_default_exe.exe')
default_exe_match = image_base_reg_ex.match(default_exe_headers)
if not default_exe_match or not default_exe_match.group(1):
test.fail_test()
if default_exe_match.group(1) != '400000':
test.fail_test()
default_dll_headers = GetHeaders('test_base_default_dll.dll')
default_dll_match = image_base_reg_ex.match(default_dll_headers)
if not default_dll_match or not default_dll_match.group(1):
test.fail_test()
if default_dll_match.group(1) != '10000000':
test.fail_test()
test.pass_test()
|
jiangzhuo/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_file_eintr.py
|
122
|
# Written to test interrupted system calls interfering with our many buffered
# IO implementations. http://bugs.python.org/issue12268
#
# It was suggested that this code could be merged into test_io and the tests
# made to work using the same method as the existing signal tests in test_io.
# I was unable to get single process tests using alarm or setitimer that way
# to reproduce the EINTR problems. This process based test suite reproduces
# the problems prior to the issue12268 patch reliably on Linux and OSX.
# - gregory.p.smith
import os
import select
import signal
import subprocess
import sys
from test.support import run_unittest
import time
import unittest
# Test import all of the things we're about to try testing up front.
from _io import FileIO
@unittest.skipUnless(os.name == 'posix', 'tests requires a posix system.')
class TestFileIOSignalInterrupt(unittest.TestCase):
def setUp(self):
self._process = None
def tearDown(self):
if self._process and self._process.poll() is None:
try:
self._process.kill()
except OSError:
pass
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code for the reader process.
subclasseses should override this to test different IO objects.
"""
return ('import _io ;'
'infile = _io.FileIO(sys.stdin.fileno(), "rb")')
def fail_with_process_info(self, why, stdout=b'', stderr=b'',
communicate=True):
"""A common way to cleanup and fail with useful debug output.
Kills the process if it is still running, collects remaining output
and fails the test with an error message including the output.
Args:
why: Text to go after "Error from IO process" in the message.
stdout, stderr: standard output and error from the process so
far to include in the error message.
communicate: bool, when True we call communicate() on the process
after killing it to gather additional output.
"""
if self._process.poll() is None:
time.sleep(0.1) # give it time to finish printing the error.
try:
self._process.terminate() # Ensure it dies.
except OSError:
pass
if communicate:
stdout_end, stderr_end = self._process.communicate()
stdout += stdout_end
stderr += stderr_end
self.fail('Error from IO process %s:\nSTDOUT:\n%sSTDERR:\n%s\n' %
(why, stdout.decode(), stderr.decode()))
def _test_reading(self, data_to_write, read_and_verify_code):
"""Generic buffered read method test harness to validate EINTR behavior.
Also validates that Python signal handlers are run during the read.
Args:
data_to_write: String to write to the child process for reading
before sending it a signal, confirming the signal was handled,
writing a final newline and closing the infile pipe.
read_and_verify_code: Single "line" of code to read from a file
object named 'infile' and validate the result. This will be
executed as part of a python subprocess fed data_to_write.
"""
infile_setup_code = self._generate_infile_setup_code()
# Total pipe IO in this function is smaller than the minimum posix OS
# pipe buffer size of 512 bytes. No writer should block.
assert len(data_to_write) < 512, 'data_to_write must fit in pipe buf.'
# Start a subprocess to call our read method while handling a signal.
self._process = subprocess.Popen(
[sys.executable, '-u', '-c',
'import signal, sys ;'
'signal.signal(signal.SIGINT, '
'lambda s, f: sys.stderr.write("$\\n")) ;'
+ infile_setup_code + ' ;' +
'sys.stderr.write("Worm Sign!\\n") ;'
+ read_and_verify_code + ' ;' +
'infile.close()'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the signal handler to be installed.
worm_sign = self._process.stderr.read(len(b'Worm Sign!\n'))
if worm_sign != b'Worm Sign!\n': # See also, Dune by Frank Herbert.
self.fail_with_process_info('while awaiting a sign',
stderr=worm_sign)
self._process.stdin.write(data_to_write)
signals_sent = 0
rlist = []
# We don't know when the read_and_verify_code in our child is actually
# executing within the read system call we want to interrupt. This
# loop waits for a bit before sending the first signal to increase
# the likelihood of that. Implementations without correct EINTR
# and signal handling usually fail this test.
while not rlist:
rlist, _, _ = select.select([self._process.stderr], (), (), 0.05)
self._process.send_signal(signal.SIGINT)
signals_sent += 1
if signals_sent > 200:
self._process.kill()
self.fail('reader process failed to handle our signals.')
# This assumes anything unexpected that writes to stderr will also
# write a newline. That is true of the traceback printing code.
signal_line = self._process.stderr.readline()
if signal_line != b'$\n':
self.fail_with_process_info('while awaiting signal',
stderr=signal_line)
# We append a newline to our input so that a readline call can
# end on its own before the EOF is seen and so that we're testing
# the read call that was interrupted by a signal before the end of
# the data stream has been reached.
stdout, stderr = self._process.communicate(input=b'\n')
if self._process.returncode:
self.fail_with_process_info(
'exited rc=%d' % self._process.returncode,
stdout, stderr, communicate=False)
# PASS!
# String format for the read_and_verify_code used by read methods.
_READING_CODE_TEMPLATE = (
'got = infile.{read_method_name}() ;'
'expected = {expected!r} ;'
'assert got == expected, ('
'"{read_method_name} returned wrong data.\\n"'
'"got data %r\\nexpected %r" % (got, expected))'
)
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected=b'hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=[b'hello\n', b'world!\n']))
def test_readall(self):
"""readall() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readall',
expected=b'hello\nworld!\n'))
# read() is the same thing as readall().
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestBufferedIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a BufferedReader."""
return ('infile = open(sys.stdin.fileno(), "rb") ;'
'import _io ;assert isinstance(infile, _io.BufferedReader)')
def test_readall(self):
"""BufferedReader.read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected=b'hello\nworld!\n'))
class TestTextIOSignalInterrupt(TestFileIOSignalInterrupt):
def _generate_infile_setup_code(self):
"""Returns the infile = ... line of code to make a TextIOWrapper."""
return ('infile = open(sys.stdin.fileno(), "rt", newline=None) ;'
'import _io ;assert isinstance(infile, _io.TextIOWrapper)')
def test_readline(self):
"""readline() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello, world!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readline',
expected='hello, world!\n'))
def test_readlines(self):
"""readlines() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\r\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='readlines',
expected=['hello\n', 'world!\n']))
def test_readall(self):
"""read() must handle signals and not lose data."""
self._test_reading(
data_to_write=b'hello\nworld!',
read_and_verify_code=self._READING_CODE_TEMPLATE.format(
read_method_name='read',
expected="hello\nworld!\n"))
def test_main():
test_cases = [
tc for tc in globals().values()
if isinstance(tc, type) and issubclass(tc, unittest.TestCase)]
run_unittest(*test_cases)
if __name__ == '__main__':
test_main()
|
leiferikb/bitpop
|
refs/heads/master
|
src/third_party/mesa/src/scons/gallium.py
|
12
|
"""gallium
Frontend-tool for Gallium3D architecture.
"""
#
# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import distutils.version
import os
import os.path
import re
import subprocess
import platform as _platform
import SCons.Action
import SCons.Builder
import SCons.Scanner
def symlink(target, source, env):
target = str(target[0])
source = str(source[0])
if os.path.islink(target) or os.path.exists(target):
os.remove(target)
os.symlink(os.path.basename(source), target)
def install(env, source, subdir):
target_dir = os.path.join(env.Dir('#.').srcnode().abspath, env['build_dir'], subdir)
return env.Install(target_dir, source)
def install_program(env, source):
return install(env, source, 'bin')
def install_shared_library(env, sources, version = ()):
targets = []
install_dir = os.path.join(env.Dir('#.').srcnode().abspath, env['build_dir'])
version = tuple(map(str, version))
if env['SHLIBSUFFIX'] == '.dll':
dlls = env.FindIxes(sources, 'SHLIBPREFIX', 'SHLIBSUFFIX')
targets += install(env, dlls, 'bin')
libs = env.FindIxes(sources, 'LIBPREFIX', 'LIBSUFFIX')
targets += install(env, libs, 'lib')
else:
for source in sources:
target_dir = os.path.join(install_dir, 'lib')
target_name = '.'.join((str(source),) + version)
last = env.InstallAs(os.path.join(target_dir, target_name), source)
targets += last
while len(version):
version = version[:-1]
target_name = '.'.join((str(source),) + version)
action = SCons.Action.Action(symlink, " Symlinking $TARGET ...")
last = env.Command(os.path.join(target_dir, target_name), last, action)
targets += last
return targets
def createInstallMethods(env):
env.AddMethod(install_program, 'InstallProgram')
env.AddMethod(install_shared_library, 'InstallSharedLibrary')
def num_jobs():
try:
return int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
pass
try:
return os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
pass
try:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
except ValueError:
pass
return 1
def generate(env):
"""Common environment generation code"""
# Tell tools which machine to compile for
env['TARGET_ARCH'] = env['machine']
env['MSVS_ARCH'] = env['machine']
# Toolchain
platform = env['platform']
env.Tool(env['toolchain'])
# Allow override compiler and specify additional flags from environment
if os.environ.has_key('CC'):
env['CC'] = os.environ['CC']
# Update CCVERSION to match
pipe = SCons.Action._subproc(env, [env['CC'], '--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() == 0:
line = pipe.stdout.readline()
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
env['CCVERSION'] = match.group(0)
if os.environ.has_key('CFLAGS'):
env['CCFLAGS'] += SCons.Util.CLVar(os.environ['CFLAGS'])
if os.environ.has_key('CXX'):
env['CXX'] = os.environ['CXX']
if os.environ.has_key('CXXFLAGS'):
env['CXXFLAGS'] += SCons.Util.CLVar(os.environ['CXXFLAGS'])
if os.environ.has_key('LDFLAGS'):
env['LINKFLAGS'] += SCons.Util.CLVar(os.environ['LDFLAGS'])
env['gcc'] = 'gcc' in os.path.basename(env['CC']).split('-')
env['msvc'] = env['CC'] == 'cl'
env['suncc'] = env['platform'] == 'sunos' and os.path.basename(env['CC']) == 'cc'
env['clang'] = env['CC'] == 'clang'
env['icc'] = 'icc' == os.path.basename(env['CC'])
if env['msvc'] and env['toolchain'] == 'default' and env['machine'] == 'x86_64':
# MSVC x64 support is broken in earlier versions of scons
env.EnsurePythonVersion(2, 0)
# shortcuts
machine = env['machine']
platform = env['platform']
x86 = env['machine'] == 'x86'
ppc = env['machine'] == 'ppc'
gcc = env['gcc']
msvc = env['msvc']
suncc = env['suncc']
icc = env['icc']
# Determine whether we are cross compiling; in particular, whether we need
# to compile code generators with a different compiler as the target code.
host_platform = _platform.system().lower()
if host_platform.startswith('cygwin'):
host_platform = 'cygwin'
host_machine = os.environ.get('PROCESSOR_ARCHITEW6432', os.environ.get('PROCESSOR_ARCHITECTURE', _platform.machine()))
host_machine = {
'x86': 'x86',
'i386': 'x86',
'i486': 'x86',
'i586': 'x86',
'i686': 'x86',
'ppc' : 'ppc',
'AMD64': 'x86_64',
'x86_64': 'x86_64',
}.get(host_machine, 'generic')
env['crosscompile'] = platform != host_platform
if machine == 'x86_64' and host_machine != 'x86_64':
env['crosscompile'] = True
env['hostonly'] = False
# Backwards compatability with the debug= profile= options
if env['build'] == 'debug':
if not env['debug']:
print 'scons: warning: debug option is deprecated and will be removed eventually; use instead'
print
print ' scons build=release'
print
env['build'] = 'release'
if env['profile']:
print 'scons: warning: profile option is deprecated and will be removed eventually; use instead'
print
print ' scons build=profile'
print
env['build'] = 'profile'
if False:
# Enforce SConscripts to use the new build variable
env.popitem('debug')
env.popitem('profile')
else:
# Backwards portability with older sconscripts
if env['build'] in ('debug', 'checked'):
env['debug'] = True
env['profile'] = False
if env['build'] == 'profile':
env['debug'] = False
env['profile'] = True
if env['build'] == 'release':
env['debug'] = False
env['profile'] = False
# Put build output in a separate dir, which depends on the current
# configuration. See also http://www.scons.org/wiki/AdvancedBuildExample
build_topdir = 'build'
build_subdir = env['platform']
if env['embedded']:
build_subdir = 'embedded-' + build_subdir
if env['machine'] != 'generic':
build_subdir += '-' + env['machine']
if env['build'] != 'release':
build_subdir += '-' + env['build']
build_dir = os.path.join(build_topdir, build_subdir)
# Place the .sconsign file in the build dir too, to avoid issues with
# different scons versions building the same source file
env['build_dir'] = build_dir
env.SConsignFile(os.path.join(build_dir, '.sconsign'))
if 'SCONS_CACHE_DIR' in os.environ:
print 'scons: Using build cache in %s.' % (os.environ['SCONS_CACHE_DIR'],)
env.CacheDir(os.environ['SCONS_CACHE_DIR'])
env['CONFIGUREDIR'] = os.path.join(build_dir, 'conf')
env['CONFIGURELOG'] = os.path.join(os.path.abspath(build_dir), 'config.log')
# Parallel build
if env.GetOption('num_jobs') <= 1:
env.SetOption('num_jobs', num_jobs())
env.Decider('MD5-timestamp')
env.SetOption('max_drift', 60)
# C preprocessor options
cppdefines = []
if env['build'] in ('debug', 'checked'):
cppdefines += ['DEBUG']
else:
cppdefines += ['NDEBUG']
if env['build'] == 'profile':
cppdefines += ['PROFILE']
if env['platform'] in ('posix', 'linux', 'freebsd', 'darwin'):
cppdefines += [
'_POSIX_SOURCE',
('_POSIX_C_SOURCE', '199309L'),
'_SVID_SOURCE',
'_BSD_SOURCE',
'_GNU_SOURCE',
'HAVE_PTHREAD',
'HAVE_POSIX_MEMALIGN',
]
if env['platform'] == 'darwin':
cppdefines += [
'_DARWIN_C_SOURCE',
'GLX_USE_APPLEGL',
'GLX_DIRECT_RENDERING',
]
else:
cppdefines += [
'GLX_DIRECT_RENDERING',
'GLX_INDIRECT_RENDERING',
]
if env['platform'] in ('linux', 'freebsd'):
cppdefines += ['HAVE_ALIAS']
else:
cppdefines += ['GLX_ALIAS_UNSUPPORTED']
if platform == 'windows':
cppdefines += [
'WIN32',
'_WINDOWS',
#'_UNICODE',
#'UNICODE',
# http://msdn.microsoft.com/en-us/library/aa383745.aspx
('_WIN32_WINNT', '0x0601'),
('WINVER', '0x0601'),
]
if gcc:
cppdefines += [('__MSVCRT_VERSION__', '0x0700')]
if msvc:
cppdefines += [
'VC_EXTRALEAN',
'_USE_MATH_DEFINES',
'_CRT_SECURE_NO_WARNINGS',
'_CRT_SECURE_NO_DEPRECATE',
'_SCL_SECURE_NO_WARNINGS',
'_SCL_SECURE_NO_DEPRECATE',
]
if env['build'] in ('debug', 'checked'):
cppdefines += ['_DEBUG']
if platform == 'windows':
cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_USER']
if platform == 'haiku':
cppdefines += ['BEOS_THREADS']
if env['embedded']:
cppdefines += ['PIPE_SUBSYSTEM_EMBEDDED']
if env['texture_float']:
print 'warning: Floating-point textures enabled.'
print 'warning: Please consult docs/patents.txt with your lawyer before building Mesa.'
cppdefines += ['TEXTURE_FLOAT_ENABLED']
env.Append(CPPDEFINES = cppdefines)
# C compiler options
cflags = [] # C
cxxflags = [] # C++
ccflags = [] # C & C++
if gcc:
ccversion = env['CCVERSION']
if env['build'] == 'debug':
ccflags += ['-O0']
elif ccversion.startswith('4.2.'):
# gcc 4.2.x optimizer is broken
print "warning: gcc 4.2.x optimizer is broken -- disabling optimizations"
ccflags += ['-O0']
else:
ccflags += ['-O3']
# gcc's builtin memcmp is slower than glibc's
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43052
ccflags += ['-fno-builtin-memcmp']
# Work around aliasing bugs - developers should comment this out
ccflags += ['-fno-strict-aliasing']
ccflags += ['-g']
if env['build'] in ('checked', 'profile'):
# See http://code.google.com/p/jrfonseca/wiki/Gprof2Dot#Which_options_should_I_pass_to_gcc_when_compiling_for_profiling?
ccflags += [
'-fno-omit-frame-pointer',
'-fno-optimize-sibling-calls',
]
if env['machine'] == 'x86':
ccflags += [
'-m32',
#'-march=pentium4',
]
if distutils.version.LooseVersion(ccversion) >= distutils.version.LooseVersion('4.2') \
and (platform != 'windows' or env['build'] == 'debug' or True) \
and platform != 'haiku':
# NOTE: We need to ensure stack is realigned given that we
# produce shared objects, and have no control over the stack
# alignment policy of the application. Therefore we need
# -mstackrealign ore -mincoming-stack-boundary=2.
#
# XXX: -O and -mstackrealign causes stack corruption on MinGW
#
# XXX: We could have SSE without -mstackrealign if we always used
# __attribute__((force_align_arg_pointer)), but that's not
# always the case.
ccflags += [
'-mstackrealign', # ensure stack is aligned
'-mmmx', '-msse', '-msse2', # enable SIMD intrinsics
#'-mfpmath=sse',
]
if platform in ['windows', 'darwin']:
# Workaround http://gcc.gnu.org/bugzilla/show_bug.cgi?id=37216
ccflags += ['-fno-common']
if platform in ['haiku']:
# Make optimizations compatible with Pentium or higher on Haiku
ccflags += [
'-mstackrealign', # ensure stack is aligned
'-march=i586', # Haiku target is Pentium
'-mtune=i686', # use i686 where we can
'-mmmx' # use mmx math where we can
]
if env['machine'] == 'x86_64':
ccflags += ['-m64']
if platform == 'darwin':
ccflags += ['-fno-common']
if env['platform'] not in ('windows', 'haiku'):
ccflags += ['-fvisibility=hidden']
# See also:
# - http://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html
ccflags += [
'-Wall',
'-Wno-long-long',
'-fmessage-length=0', # be nice to Eclipse
]
cflags += [
'-Wmissing-prototypes',
'-std=gnu99',
]
if distutils.version.LooseVersion(ccversion) >= distutils.version.LooseVersion('4.2'):
ccflags += [
'-Wpointer-arith',
]
cflags += [
'-Wdeclaration-after-statement',
]
if icc:
cflags += [
'-std=gnu99',
]
if msvc:
# See also:
# - http://msdn.microsoft.com/en-us/library/19z1t1wy.aspx
# - cl /?
if env['build'] == 'debug':
ccflags += [
'/Od', # disable optimizations
'/Oi', # enable intrinsic functions
'/Oy-', # disable frame pointer omission
]
else:
ccflags += [
'/O2', # optimize for speed
]
if env['build'] == 'release':
ccflags += [
'/GL', # enable whole program optimization
]
else:
ccflags += [
'/GL-', # disable whole program optimization
]
ccflags += [
'/W3', # warning level
#'/Wp64', # enable 64 bit porting warnings
'/wd4996', # disable deprecated POSIX name warnings
]
if env['machine'] == 'x86':
ccflags += [
#'/arch:SSE2', # use the SSE2 instructions
]
if platform == 'windows':
ccflags += [
# TODO
]
# Automatic pdb generation
# See http://scons.tigris.org/issues/show_bug.cgi?id=1656
env.EnsureSConsVersion(0, 98, 0)
env['PDB'] = '${TARGET.base}.pdb'
env.Append(CCFLAGS = ccflags)
env.Append(CFLAGS = cflags)
env.Append(CXXFLAGS = cxxflags)
if env['platform'] == 'windows' and msvc:
# Choose the appropriate MSVC CRT
# http://msdn.microsoft.com/en-us/library/2kzt1wy3.aspx
if env['build'] in ('debug', 'checked'):
env.Append(CCFLAGS = ['/MTd'])
env.Append(SHCCFLAGS = ['/LDd'])
else:
env.Append(CCFLAGS = ['/MT'])
env.Append(SHCCFLAGS = ['/LD'])
# Assembler options
if gcc:
if env['machine'] == 'x86':
env.Append(ASFLAGS = ['-m32'])
if env['machine'] == 'x86_64':
env.Append(ASFLAGS = ['-m64'])
# Linker options
linkflags = []
shlinkflags = []
if gcc:
if env['machine'] == 'x86':
linkflags += ['-m32']
if env['machine'] == 'x86_64':
linkflags += ['-m64']
if env['platform'] not in ('darwin'):
shlinkflags += [
'-Wl,-Bsymbolic',
]
# Handle circular dependencies in the libraries
if env['platform'] in ('darwin'):
pass
else:
env['_LIBFLAGS'] = '-Wl,--start-group ' + env['_LIBFLAGS'] + ' -Wl,--end-group'
if env['platform'] == 'windows':
# Avoid depending on gcc runtime DLLs
linkflags += ['-static-libgcc']
if 'w64' in env['CC'].split('-'):
linkflags += ['-static-libstdc++']
# Handle the @xx symbol munging of DLL exports
shlinkflags += ['-Wl,--enable-stdcall-fixup']
#shlinkflags += ['-Wl,--kill-at']
if msvc:
if env['build'] == 'release':
# enable Link-time Code Generation
linkflags += ['/LTCG']
env.Append(ARFLAGS = ['/LTCG'])
if platform == 'windows' and msvc:
# See also:
# - http://msdn2.microsoft.com/en-us/library/y0zzbyt4.aspx
linkflags += [
'/fixed:no',
'/incremental:no',
]
env.Append(LINKFLAGS = linkflags)
env.Append(SHLINKFLAGS = shlinkflags)
# We have C++ in several libraries, so always link with the C++ compiler
if env['gcc'] or env['clang']:
env['LINK'] = env['CXX']
# Default libs
libs = []
if env['platform'] in ('darwin', 'freebsd', 'linux', 'posix', 'sunos'):
libs += ['m', 'pthread', 'dl']
env.Append(LIBS = libs)
# OpenMP
if env['openmp']:
if env['msvc']:
env.Append(CCFLAGS = ['/openmp'])
# When building openmp release VS2008 link.exe crashes with LNK1103 error.
# Workaround: overwrite PDB flags with empty value as it isn't required anyways
if env['build'] == 'release':
env['PDB'] = ''
if env['gcc']:
env.Append(CCFLAGS = ['-fopenmp'])
env.Append(LIBS = ['gomp'])
# Load tools
env.Tool('lex')
env.Tool('yacc')
if env['llvm']:
env.Tool('llvm')
# Custom builders and methods
env.Tool('custom')
createInstallMethods(env)
env.PkgCheckModules('X11', ['x11', 'xext', 'xdamage', 'xfixes'])
env.PkgCheckModules('XCB', ['x11-xcb', 'xcb-glx >= 1.8.1'])
env.PkgCheckModules('XF86VIDMODE', ['xxf86vm'])
env.PkgCheckModules('DRM', ['libdrm >= 2.4.24'])
env.PkgCheckModules('DRM_INTEL', ['libdrm_intel >= 2.4.30'])
env.PkgCheckModules('DRM_RADEON', ['libdrm_radeon >= 2.4.31'])
env.PkgCheckModules('XORG', ['xorg-server >= 1.6.0'])
env.PkgCheckModules('KMS', ['libkms >= 2.4.24'])
env.PkgCheckModules('UDEV', ['libudev > 150'])
env['dri'] = env['x11'] and env['drm']
# for debugging
#print env.Dump()
def exists(env):
return 1
|
ramaganapathy1/AMuDA-Ir-back-end
|
refs/heads/master
|
vEnv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/response.py
|
515
|
from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
from ..exceptions import HeaderParsingError
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers):
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param headers: Headers to verify.
:type headers: `httplib.HTTPMessage`.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError('expected httplib.Message, got {0}.'.format(
type(headers)))
defects = getattr(headers, 'defects', None)
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
if get_payload: # Platform-specific: Python 3.
unparsed_data = get_payload()
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response):
"""
Checks whether the request of a response has been a HEAD-request.
Handles the quirks of AppEngine.
:param conn:
:type conn: :class:`httplib.HTTPResponse`
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method = response._method
if isinstance(method, int): # Platform-specific: Appengine
return method == 3
return method.upper() == 'HEAD'
|
pothosware/gnuradio
|
refs/heads/master
|
gr-digital/python/digital/cpm.py
|
41
|
#
# CPM modulation and demodulation.
#
#
# Copyright 2005-2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# See gnuradio-examples/python/digital for examples
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from math import pi
import numpy
import digital_swig
import modulation_utils
# default values (used in __init__ and add_options)
_def_samples_per_symbol = 2
_def_bits_per_symbol = 1
_def_h_numerator = 1
_def_h_denominator = 2
_def_cpm_type = 0 # 0=CPFSK, 1=GMSK, 2=RC, 3=GENERAL
_def_bt = 0.35
_def_symbols_per_pulse = 1
_def_generic_taps = numpy.empty(1)
_def_verbose = False
_def_log = False
# /////////////////////////////////////////////////////////////////////////////
# CPM modulator
# /////////////////////////////////////////////////////////////////////////////
class cpm_mod(gr.hier_block2):
"""
Hierarchical block for Continuous Phase modulation.
The input is a byte stream (unsigned char) representing packed
bits and the output is the complex modulated signal at baseband.
See Proakis for definition of generic CPM signals:
s(t)=exp(j phi(t))
phi(t)= 2 pi h int_0^t f(t') dt'
f(t)=sum_k a_k g(t-kT)
(normalizing assumption: int_0^infty g(t) dt = 1/2)
Args:
samples_per_symbol: samples per baud >= 2 (integer)
bits_per_symbol: bits per symbol (integer)
h_numerator: numerator of modulation index (integer)
h_denominator: denominator of modulation index (numerator and denominator must be relative primes) (integer)
cpm_type: supported types are: 0=CPFSK, 1=GMSK, 2=RC, 3=GENERAL (integer)
bt: bandwidth symbol time product for GMSK (float)
symbols_per_pulse: shaping pulse duration in symbols (integer)
generic_taps: define a generic CPM pulse shape (sum = samples_per_symbol/2) (list/array of floats)
verbose: Print information about modulator? (boolean)
debug: Print modulation data to files? (boolean)
"""
def __init__(self,
samples_per_symbol=_def_samples_per_symbol,
bits_per_symbol=_def_bits_per_symbol,
h_numerator=_def_h_numerator,
h_denominator=_def_h_denominator,
cpm_type=_def_cpm_type,
bt=_def_bt,
symbols_per_pulse=_def_symbols_per_pulse,
generic_taps=_def_generic_taps,
verbose=_def_verbose,
log=_def_log):
gr.hier_block2.__init__(self, "cpm_mod",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._samples_per_symbol = samples_per_symbol
self._bits_per_symbol = bits_per_symbol
self._h_numerator = h_numerator
self._h_denominator = h_denominator
self._cpm_type = cpm_type
self._bt=bt
if cpm_type == 0 or cpm_type == 2 or cpm_type == 3: # CPFSK, RC, Generic
self._symbols_per_pulse = symbols_per_pulse
elif cpm_type == 1: # GMSK
self._symbols_per_pulse = 4
else:
raise TypeError, ("cpm_type must be an integer in {0,1,2,3}, is %r" % (cpm_type,))
self._generic_taps=numpy.array(generic_taps)
if samples_per_symbol < 2:
raise TypeError, ("samples_per_symbol must be >= 2, is %r" % (samples_per_symbol,))
self.nsymbols = 2**bits_per_symbol
self.sym_alphabet = numpy.arange(-(self.nsymbols-1),self.nsymbols,2).tolist()
self.ntaps = int(self._symbols_per_pulse * samples_per_symbol)
sensitivity = 2 * pi * h_numerator / h_denominator / samples_per_symbol
# Unpack Bytes into bits_per_symbol groups
self.B2s = blocks.packed_to_unpacked_bb(bits_per_symbol,gr.GR_MSB_FIRST)
# Turn it into symmetric PAM data.
self.pam = digital_swig.chunks_to_symbols_bf(self.sym_alphabet,1)
# Generate pulse (sum of taps = samples_per_symbol/2)
if cpm_type == 0: # CPFSK
self.taps= (1.0/self._symbols_per_pulse/2,) * self.ntaps
elif cpm_type == 1: # GMSK
gaussian_taps = filter.firdes.gaussian(
1.0/2, # gain
samples_per_symbol, # symbol_rate
bt, # bandwidth * symbol time
self.ntaps # number of taps
)
sqwave = (1,) * samples_per_symbol # rectangular window
self.taps = numpy.convolve(numpy.array(gaussian_taps),numpy.array(sqwave))
elif cpm_type == 2: # Raised Cosine
# generalize it for arbitrary roll-off factor
self.taps = (1-numpy.cos(2*pi*numpy.arange(0,self.ntaps)/samples_per_symbol/self._symbols_per_pulse))/(2*self._symbols_per_pulse)
elif cpm_type == 3: # Generic CPM
self.taps = generic_taps
else:
raise TypeError, ("cpm_type must be an integer in {0,1,2,3}, is %r" % (cpm_type,))
self.filter = filter.pfb.arb_resampler_fff(samples_per_symbol, self.taps)
# FM modulation
self.fmmod = analog.frequency_modulator_fc(sensitivity)
if verbose:
self._print_verbage()
if log:
self._setup_logging()
# Connect
self.connect(self, self.B2s, self.pam, self.filter, self.fmmod, self)
def samples_per_symbol(self):
return self._samples_per_symbol
def bits_per_symbol(self):
return self._bits_per_symbol
def h_numerator(self):
return self._h_numerator
def h_denominator(self):
return self._h_denominator
def cpm_type(self):
return self._cpm_type
def bt(self):
return self._bt
def symbols_per_pulse(self):
return self._symbols_per_pulse
def _print_verbage(self):
print "Samples per symbol = %d" % self._samples_per_symbol
print "Bits per symbol = %d" % self._bits_per_symbol
print "h = " , self._h_numerator , " / " , self._h_denominator
print "Symbol alphabet = " , self.sym_alphabet
print "Symbols per pulse = %d" % self._symbols_per_pulse
print "taps = " , self.taps
print "CPM type = %d" % self._cpm_type
if self._cpm_type == 1:
print "Gaussian filter BT = %.2f" % self._bt
def _setup_logging(self):
print "Modulation logging turned on."
self.connect(self.B2s,
blocks.file_sink(gr.sizeof_float, "symbols.dat"))
self.connect(self.pam,
blocks.file_sink(gr.sizeof_float, "pam.dat"))
self.connect(self.filter,
blocks.file_sink(gr.sizeof_float, "filter.dat"))
self.connect(self.fmmod,
blocks.file_sink(gr.sizeof_gr_complex, "fmmod.dat"))
def add_options(parser):
"""
Adds CPM modulation-specific options to the standard parser
"""
parser.add_option("", "--bt", type="float", default=_def_bt,
help="set bandwidth-time product [default=%default] (GMSK)")
add_options=staticmethod(add_options)
def extract_kwargs_from_options(options):
"""
Given command line options, create dictionary suitable for passing to __init__
"""
return modulation_utils.extract_kwargs_from_options(cpm_mod.__init__,
('self',), options)
extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)
# /////////////////////////////////////////////////////////////////////////////
# CPM demodulator
# /////////////////////////////////////////////////////////////////////////////
#
# Not yet implemented
#
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('cpm', cpm_mod)
#modulation_utils.add_type_1_demod('cpm', cpm_demod)
|
holmes/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/Yield33.after.py
|
79
|
def f(xs):
found = False
found = yield from bar(found, xs)
print(found)
def bar(found_new, xs_new):
for x in xs_new:
yield x
found_new = True
return found_new
|
rzambre/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/py/py/_code/_assertionold.py
|
218
|
import py
import sys, inspect
from compiler import parse, ast, pycodegen
from py._code.assertion import BuiltinAssertionError, _format_explanation
passthroughex = py.builtin._sysex
class Failure:
def __init__(self, node):
self.exc, self.value, self.tb = sys.exc_info()
self.node = node
class View(object):
"""View base class.
If C is a subclass of View, then C(x) creates a proxy object around
the object x. The actual class of the proxy is not C in general,
but a *subclass* of C determined by the rules below. To avoid confusion
we call view class the class of the proxy (a subclass of C, so of View)
and object class the class of x.
Attributes and methods not found in the proxy are automatically read on x.
Other operations like setting attributes are performed on the proxy, as
determined by its view class. The object x is available from the proxy
as its __obj__ attribute.
The view class selection is determined by the __view__ tuples and the
optional __viewkey__ method. By default, the selected view class is the
most specific subclass of C whose __view__ mentions the class of x.
If no such subclass is found, the search proceeds with the parent
object classes. For example, C(True) will first look for a subclass
of C with __view__ = (..., bool, ...) and only if it doesn't find any
look for one with __view__ = (..., int, ...), and then ..., object,...
If everything fails the class C itself is considered to be the default.
Alternatively, the view class selection can be driven by another aspect
of the object x, instead of the class of x, by overriding __viewkey__.
See last example at the end of this module.
"""
_viewcache = {}
__view__ = ()
def __new__(rootclass, obj, *args, **kwds):
self = object.__new__(rootclass)
self.__obj__ = obj
self.__rootclass__ = rootclass
key = self.__viewkey__()
try:
self.__class__ = self._viewcache[key]
except KeyError:
self.__class__ = self._selectsubclass(key)
return self
def __getattr__(self, attr):
# attributes not found in the normal hierarchy rooted on View
# are looked up in the object's real class
return getattr(self.__obj__, attr)
def __viewkey__(self):
return self.__obj__.__class__
def __matchkey__(self, key, subclasses):
if inspect.isclass(key):
keys = inspect.getmro(key)
else:
keys = [key]
for key in keys:
result = [C for C in subclasses if key in C.__view__]
if result:
return result
return []
def _selectsubclass(self, key):
subclasses = list(enumsubclasses(self.__rootclass__))
for C in subclasses:
if not isinstance(C.__view__, tuple):
C.__view__ = (C.__view__,)
choices = self.__matchkey__(key, subclasses)
if not choices:
return self.__rootclass__
elif len(choices) == 1:
return choices[0]
else:
# combine the multiple choices
return type('?', tuple(choices), {})
def __repr__(self):
return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
def enumsubclasses(cls):
for subcls in cls.__subclasses__():
for subsubclass in enumsubclasses(subcls):
yield subsubclass
yield cls
class Interpretable(View):
"""A parse tree node with a few extra methods."""
explanation = None
def is_builtin(self, frame):
return False
def eval(self, frame):
# fall-back for unknown expression nodes
try:
expr = ast.Expression(self.__obj__)
expr.filename = '<eval>'
self.__obj__.filename = '<eval>'
co = pycodegen.ExpressionCodeGenerator(expr).getCode()
result = frame.eval(co)
except passthroughex:
raise
except:
raise Failure(self)
self.result = result
self.explanation = self.explanation or frame.repr(self.result)
def run(self, frame):
# fall-back for unknown statement nodes
try:
expr = ast.Module(None, ast.Stmt([self.__obj__]))
expr.filename = '<run>'
co = pycodegen.ModuleCodeGenerator(expr).getCode()
frame.exec_(co)
except passthroughex:
raise
except:
raise Failure(self)
def nice_explanation(self):
return _format_explanation(self.explanation)
class Name(Interpretable):
__view__ = ast.Name
def is_local(self, frame):
source = '%r in locals() is not globals()' % self.name
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def is_global(self, frame):
source = '%r in globals()' % self.name
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def is_builtin(self, frame):
source = '%r not in locals() and %r not in globals()' % (
self.name, self.name)
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def eval(self, frame):
super(Name, self).eval(frame)
if not self.is_local(frame):
self.explanation = self.name
class Compare(Interpretable):
__view__ = ast.Compare
def eval(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
for operation, expr2 in self.ops:
if hasattr(self, 'result'):
# shortcutting in chained expressions
if not frame.is_true(self.result):
break
expr2 = Interpretable(expr2)
expr2.eval(frame)
self.explanation = "%s %s %s" % (
expr.explanation, operation, expr2.explanation)
source = "__exprinfo_left %s __exprinfo_right" % operation
try:
self.result = frame.eval(source,
__exprinfo_left=expr.result,
__exprinfo_right=expr2.result)
except passthroughex:
raise
except:
raise Failure(self)
expr = expr2
class And(Interpretable):
__view__ = ast.And
def eval(self, frame):
explanations = []
for expr in self.nodes:
expr = Interpretable(expr)
expr.eval(frame)
explanations.append(expr.explanation)
self.result = expr.result
if not frame.is_true(expr.result):
break
self.explanation = '(' + ' and '.join(explanations) + ')'
class Or(Interpretable):
__view__ = ast.Or
def eval(self, frame):
explanations = []
for expr in self.nodes:
expr = Interpretable(expr)
expr.eval(frame)
explanations.append(expr.explanation)
self.result = expr.result
if frame.is_true(expr.result):
break
self.explanation = '(' + ' or '.join(explanations) + ')'
# == Unary operations ==
keepalive = []
for astclass, astpattern in {
ast.Not : 'not __exprinfo_expr',
ast.Invert : '(~__exprinfo_expr)',
}.items():
class UnaryArith(Interpretable):
__view__ = astclass
def eval(self, frame, astpattern=astpattern):
expr = Interpretable(self.expr)
expr.eval(frame)
self.explanation = astpattern.replace('__exprinfo_expr',
expr.explanation)
try:
self.result = frame.eval(astpattern,
__exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
keepalive.append(UnaryArith)
# == Binary operations ==
for astclass, astpattern in {
ast.Add : '(__exprinfo_left + __exprinfo_right)',
ast.Sub : '(__exprinfo_left - __exprinfo_right)',
ast.Mul : '(__exprinfo_left * __exprinfo_right)',
ast.Div : '(__exprinfo_left / __exprinfo_right)',
ast.Mod : '(__exprinfo_left % __exprinfo_right)',
ast.Power : '(__exprinfo_left ** __exprinfo_right)',
}.items():
class BinaryArith(Interpretable):
__view__ = astclass
def eval(self, frame, astpattern=astpattern):
left = Interpretable(self.left)
left.eval(frame)
right = Interpretable(self.right)
right.eval(frame)
self.explanation = (astpattern
.replace('__exprinfo_left', left .explanation)
.replace('__exprinfo_right', right.explanation))
try:
self.result = frame.eval(astpattern,
__exprinfo_left=left.result,
__exprinfo_right=right.result)
except passthroughex:
raise
except:
raise Failure(self)
keepalive.append(BinaryArith)
class CallFunc(Interpretable):
__view__ = ast.CallFunc
def is_bool(self, frame):
source = 'isinstance(__exprinfo_value, bool)'
try:
return frame.is_true(frame.eval(source,
__exprinfo_value=self.result))
except passthroughex:
raise
except:
return False
def eval(self, frame):
node = Interpretable(self.node)
node.eval(frame)
explanations = []
vars = {'__exprinfo_fn': node.result}
source = '__exprinfo_fn('
for a in self.args:
if isinstance(a, ast.Keyword):
keyword = a.name
a = a.expr
else:
keyword = None
a = Interpretable(a)
a.eval(frame)
argname = '__exprinfo_%d' % len(vars)
vars[argname] = a.result
if keyword is None:
source += argname + ','
explanations.append(a.explanation)
else:
source += '%s=%s,' % (keyword, argname)
explanations.append('%s=%s' % (keyword, a.explanation))
if self.star_args:
star_args = Interpretable(self.star_args)
star_args.eval(frame)
argname = '__exprinfo_star'
vars[argname] = star_args.result
source += '*' + argname + ','
explanations.append('*' + star_args.explanation)
if self.dstar_args:
dstar_args = Interpretable(self.dstar_args)
dstar_args.eval(frame)
argname = '__exprinfo_kwds'
vars[argname] = dstar_args.result
source += '**' + argname + ','
explanations.append('**' + dstar_args.explanation)
self.explanation = "%s(%s)" % (
node.explanation, ', '.join(explanations))
if source.endswith(','):
source = source[:-1]
source += ')'
try:
self.result = frame.eval(source, **vars)
except passthroughex:
raise
except:
raise Failure(self)
if not node.is_builtin(frame) or not self.is_bool(frame):
r = frame.repr(self.result)
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
class Getattr(Interpretable):
__view__ = ast.Getattr
def eval(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
source = '__exprinfo_expr.%s' % self.attrname
try:
self.result = frame.eval(source, __exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
self.explanation = '%s.%s' % (expr.explanation, self.attrname)
# if the attribute comes from the instance, its value is interesting
source = ('hasattr(__exprinfo_expr, "__dict__") and '
'%r in __exprinfo_expr.__dict__' % self.attrname)
try:
from_instance = frame.is_true(
frame.eval(source, __exprinfo_expr=expr.result))
except passthroughex:
raise
except:
from_instance = True
if from_instance:
r = frame.repr(self.result)
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
# == Re-interpretation of full statements ==
class Assert(Interpretable):
__view__ = ast.Assert
def run(self, frame):
test = Interpretable(self.test)
test.eval(frame)
# simplify 'assert False where False = ...'
if (test.explanation.startswith('False\n{False = ') and
test.explanation.endswith('\n}')):
test.explanation = test.explanation[15:-2]
# print the result as 'assert <explanation>'
self.result = test.result
self.explanation = 'assert ' + test.explanation
if not frame.is_true(test.result):
try:
raise BuiltinAssertionError
except passthroughex:
raise
except:
raise Failure(self)
class Assign(Interpretable):
__view__ = ast.Assign
def run(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
self.result = expr.result
self.explanation = '... = ' + expr.explanation
# fall-back-run the rest of the assignment
ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
mod = ast.Module(None, ast.Stmt([ass]))
mod.filename = '<run>'
co = pycodegen.ModuleCodeGenerator(mod).getCode()
try:
frame.exec_(co, __exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
class Discard(Interpretable):
__view__ = ast.Discard
def run(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
self.result = expr.result
self.explanation = expr.explanation
class Stmt(Interpretable):
__view__ = ast.Stmt
def run(self, frame):
for stmt in self.nodes:
stmt = Interpretable(stmt)
stmt.run(frame)
def report_failure(e):
explanation = e.node.nice_explanation()
if explanation:
explanation = ", in: " + explanation
else:
explanation = ""
sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
def check(s, frame=None):
if frame is None:
frame = sys._getframe(1)
frame = py.code.Frame(frame)
expr = parse(s, 'eval')
assert isinstance(expr, ast.Expression)
node = Interpretable(expr.node)
try:
node.eval(frame)
except passthroughex:
raise
except Failure:
e = sys.exc_info()[1]
report_failure(e)
else:
if not frame.is_true(node.result):
sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
###########################################################
# API / Entry points
# #########################################################
def interpret(source, frame, should_fail=False):
module = Interpretable(parse(source, 'exec').node)
#print "got module", module
if isinstance(frame, py.std.types.FrameType):
frame = py.code.Frame(frame)
try:
module.run(frame)
except Failure:
e = sys.exc_info()[1]
return getfailure(e)
except passthroughex:
raise
except:
import traceback
traceback.print_exc()
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --nomagic)")
else:
return None
def getmsg(excinfo):
if isinstance(excinfo, tuple):
excinfo = py.code.ExceptionInfo(excinfo)
#frame, line = gettbline(tb)
#frame = py.code.Frame(frame)
#return interpret(line, frame)
tb = excinfo.traceback[-1]
source = str(tb.statement).strip()
x = interpret(source, tb.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
return x
def getfailure(e):
explanation = e.node.nice_explanation()
if str(e.value):
lines = explanation.split('\n')
lines[0] += " << %s" % (e.value,)
explanation = '\n'.join(lines)
text = "%s: %s" % (e.exc.__name__, explanation)
if text.startswith('AssertionError: assert '):
text = text[16:]
return text
def run(s, frame=None):
if frame is None:
frame = sys._getframe(1)
frame = py.code.Frame(frame)
module = Interpretable(parse(s, 'exec').node)
try:
module.run(frame)
except Failure:
e = sys.exc_info()[1]
report_failure(e)
if __name__ == '__main__':
# example:
def f():
return 5
def g():
return 3
def h(x):
return 'never'
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
check("f() == g()")
i = 4
check("i == f()")
check("len(f()) == 0")
check("isinstance(2+3+4, float)")
run("x = i")
check("x == 5")
run("assert not f(), 'oops'")
run("a, b, c = 1, 2")
run("a, b, c = f()")
check("max([f(),g()]) == 4")
check("'hello'[g()] == 'h'")
run("'guk%d' % h(f())")
|
Metronus/metronus
|
refs/heads/master
|
Metronus-Project/metronus_app/apps.py
|
1
|
from django.apps import AppConfig
class MetronusAppConfig(AppConfig):
""" La documentasió """
name = 'metronus_app'
|
zhaoxianpeng/s3c2440
|
refs/heads/master
|
linux-3.10.71/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
osvalr/odoo
|
refs/heads/8.0
|
addons/account_bank_statement_extensions/res_partner_bank.py
|
381
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class res_partner_bank(osv.osv):
_inherit = 'res.partner.bank'
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = []
if name:
ids = self.search(cr, user, [('acc_number', operator, name)] + args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
stonegithubs/micropython
|
refs/heads/master
|
tests/basics/for2.py
|
118
|
i = 'init'
for i in range(0):
pass
print(i) # should not have been modified
for i in range(10):
pass
print(i) # should be last successful value of loop
|
randynobx/ansible
|
refs/heads/devel
|
test/units/module_utils/test_postgresql.py
|
63
|
import json
import sys
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import builtins
from ansible.module_utils._text import to_native
from units.mock.procenv import swap_stdin_and_argv
import pprint
realimport = builtins.__import__
class TestPostgres(unittest.TestCase):
def clear_modules(self, mods):
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
@patch.object(builtins, '__import__')
def test_postgres_pg2_missing_ensure_libs(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'psycopg2':
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertFalse(mod.module_utils.postgres.HAS_PSYCOPG2)
with self.assertRaises(mod.module_utils.postgres.LibraryError) as context:
mod.module_utils.postgres.ensure_libs(sslrootcert=None)
self.assertIn('psycopg2 is not installed', to_native(context.exception))
@patch.object(builtins, '__import__')
def test_postgres_pg2_found_ensure_libs(self, mock_import):
def _mock_import(name, *args, **kwargs):
if 'psycopg2' in name:
return MagicMock()
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2)
ensure_ret = mod.module_utils.postgres.ensure_libs(sslrootcert=None)
self.assertFalse(ensure_ret)
pprint.pprint(ensure_ret)
@patch.object(builtins, '__import__')
def test_postgres_pg2_found_ensure_libs_old_version(self, mock_import):
def _mock_import(name, *args, **kwargs):
if 'psycopg2' in name:
m = MagicMock()
m.__version__ = '2.4.1'
return m
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2)
with self.assertRaises(mod.module_utils.postgres.LibraryError) as context:
mod.module_utils.postgres.ensure_libs(sslrootcert='yes')
self.assertIn('psycopg2 must be at least 2.4.3 in order to use', to_native(context.exception))
|
ahmedRguei/job
|
refs/heads/master
|
typo3conf/ext/extension_builder/Resources/Public/jsDomainModeling/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py
|
1812
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
|
joopert/home-assistant
|
refs/heads/dev
|
homeassistant/components/blackbird/__init__.py
|
36
|
"""The blackbird component."""
|
Maccimo/intellij-community
|
refs/heads/master
|
python/testData/completion/initParams.after.py
|
83
|
class C:
def __init__(self, auno=True): pass
c = C(auno=)
|
soonhokong/dReal-osx
|
refs/heads/master
|
benchmarks/network/battery/battery-double-p.py
|
11
|
from gen import *
##########
# shared #
##########
flow_var[0] = """
(declare-fun tau () Real)
"""
flow_dec[0] = """
(define-ode flow_1 ((= d/dt[tau] 1)))
"""
state_dec[0] = """
(declare-fun time_{0} () Real)
(declare-fun tau_{0}_0 () Real)
(declare-fun tau_{0}_t () Real)
"""
state_val[0] = """
(assert (<= 0 time_{0})) (assert (<= time_{0} 20))
(assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 50))
(assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 50))
(assert (or
(and (= mode1U_{0} true) (= mode1S_{0} true) (= mode2U_{0} true) (= mode2S_{0} true))
(and (= mode1U_{0} true) (= mode1S_{0} false) (= mode2U_{0} false) (= mode2S_{0} true))
(and (= mode1U_{0} true) (= mode1S_{0} false) (= mode2U_{0} false) (= mode2S_{0} false))
(and (= mode1U_{0} false) (= mode1S_{0} true) (= mode2U_{0} true) (= mode2S_{0} false))
(and (= mode1U_{0} false) (= mode1S_{0} false) (= mode2U_{0} true) (= mode2S_{0} false))
(and (= mode1U_{0} false) (= mode1S_{0} false) (= mode2U_{0} false) (= mode2S_{0} false))))
"""
cont_cond[0] = ["""
(assert (and (= [d1_{0}_t g1_{0}_t g2_{0}_t d2_{0}_t tau_{0}_t]
(pintegral 0. time_{0}
[d1_{0}_0 g1_{0}_0 g2_{0}_0 d2_{0}_0 tau_{0}_0]
[holder_{1} holder_{2} holder_{3}]))
(connect holder_{3} flow_1)))"""]
jump_cond[0] = ["""
(assert (= tau_{1}_0 tau_{0}_t))"""]
#############
# battery 1 #
#############
flow_var[1] = """
(declare-fun d1 () Real)
(declare-fun g1 () Real)
"""
flow_dec[1] = """
(define-ode flow_2 ((= d/dt[d1] (- (/ 0.5 0.166) (* 0.122 d1))) (= d/dt[g1] -0.5)))
(define-ode flow_3 ((= d/dt[d1] (- (/ 1 0.166) (* 0.122 d1))) (= d/dt[g1] -1)))
(define-ode flow_4 ((= d/dt[d1] (- 0 (* 0.122 d1))) (= d/dt[g1] 0)))
(define-ode flow_5 ((= d/dt[d1] 0) (= d/dt[g1] 0)))
"""
state_dec[1] = """
(declare-fun mode1U_{0} () Bool)
(declare-fun mode1S_{0} () Bool)
(declare-fun d1_{0}_0 () Real)
(declare-fun d1_{0}_t () Real)
(declare-fun g1_{0}_0 () Real)
(declare-fun g1_{0}_t () Real)
"""
state_val[1] = """
(assert (<= -10 d1_{0}_0)) (assert (<= d1_{0}_0 10))
(assert (<= -10 d1_{0}_t)) (assert (<= d1_{0}_t 10))
(assert (<= -10 g1_{0}_0)) (assert (<= g1_{0}_0 10))
(assert (<= -10 g1_{0}_t)) (assert (<= g1_{0}_t 10))
"""
cont_cond[1] = ["""
(assert (or (and (= mode1U_{0} true) (= mode1S_{0} true) (connect holder_{1} flow_2))
(and (= mode1U_{0} true) (= mode1S_{0} false) (connect holder_{1} flow_3))
(and (= mode1U_{0} false) (= mode1S_{0} true) (connect holder_{1} flow_4))
(and (= mode1U_{0} false) (= mode1S_{0} false) (connect holder_{1} flow_5))))
(assert (not (and (connect holder_{1} flow_2) (connect holder_{1} flow_3))))
(assert (not (and (connect holder_{1} flow_2) (connect holder_{1} flow_4))))
(assert (not (and (connect holder_{1} flow_2) (connect holder_{1} flow_5))))
(assert (not (and (connect holder_{1} flow_3) (connect holder_{1} flow_4))))
(assert (not (and (connect holder_{1} flow_3) (connect holder_{1} flow_5))))
(assert (not (and (connect holder_{1} flow_4) (connect holder_{1} flow_5))))"""]
jump_cond[1] = ["""
(assert (and (= d1_{1}_0 d1_{0}_t) (= g1_{1}_0 g1_{0}_t)))
(assert (or (and (<= g1_0_t (* (- 1 0.166) d1_0_t))
(= mode1U_{1} false) (= mode1S_{1} false))
(and (> g1_0_t (* (- 1 0.166) d1_0_t))
(not (and (= mode1U_{0} false) (= mode1S_{0} false)))
(not (and (= mode1U_{1} false) (= mode1S_{1} false))))))"""]
#############
# battery 2 #
#############
flow_var[2] = """
(declare-fun d2 () Real)
(declare-fun g2 () Real)
"""
flow_dec[2] = """
(define-ode flow_6 ((= d/dt[d2] (- (/ 0.5 0.166) (* 0.122 d2))) (= d/dt[g2] -0.5)))
(define-ode flow_7 ((= d/dt[d2] (- (/ 1 0.166) (* 0.122 d2))) (= d/dt[g2] -1)))
(define-ode flow_8 ((= d/dt[d2] (- 0 (* 0.122 d2))) (= d/dt[g2] 0)))
(define-ode flow_9 ((= d/dt[d2] 0) (= d/dt[g2] 0)))
"""
state_dec[2] = """
(declare-fun mode2U_{0} () Bool)
(declare-fun mode2S_{0} () Bool)
(declare-fun d2_{0}_0 () Real)
(declare-fun d2_{0}_t () Real)
(declare-fun g2_{0}_0 () Real)
(declare-fun g2_{0}_t () Real)
"""
state_val[2] = """
(assert (<= -10 d2_{0}_0)) (assert (<= d2_{0}_0 10))
(assert (<= -10 d2_{0}_t)) (assert (<= d2_{0}_t 10))
(assert (<= -10 g2_{0}_0)) (assert (<= g2_{0}_0 10))
(assert (<= -10 g2_{0}_t)) (assert (<= g2_{0}_t 10))
"""
cont_cond[2] = ["""
(assert (or (and (= mode2U_{0} true) (= mode2S_{0} true) (connect holder_{2} flow_6))
(and (= mode2U_{0} true) (= mode2S_{0} false) (connect holder_{2} flow_7))
(and (= mode2U_{0} false) (= mode2S_{0} true) (connect holder_{2} flow_8))
(and (= mode2U_{0} false) (= mode2S_{0} false) (connect holder_{2} flow_9))))
(assert (not (and (connect holder_{2} flow_6) (connect holder_{2} flow_7))))
(assert (not (and (connect holder_{2} flow_6) (connect holder_{2} flow_8))))
(assert (not (and (connect holder_{2} flow_6) (connect holder_{2} flow_9))))
(assert (not (and (connect holder_{2} flow_7) (connect holder_{2} flow_8))))
(assert (not (and (connect holder_{2} flow_7) (connect holder_{2} flow_9))))
(assert (not (and (connect holder_{2} flow_8) (connect holder_{2} flow_9))))"""]
jump_cond[2] = ["""
(assert (and (= d2_{1}_0 d2_{0}_t) (= g2_{1}_0 g2_{0}_t)))
(assert (or (and (<= g2_0_t (* (- 1 0.166) d2_0_t))
(= mode2U_{1} false) (= mode2S_{1} false))
(and (> g2_0_t (* (- 1 0.166) d2_0_t))
(not (and (= mode2U_{0} false) (= mode2S_{0} false)))
(not (and (= mode2U_{1} false) (= mode2S_{1} false))))))"""]
#############
# Init/Goal #
#############
init_cond = """
(assert (= tau_{0}_0 0))
(assert (and (= mode1U_{0} true) (= mode1S_{0} true)))
(assert (and (= g1_{0}_0 8.5) (= d1_{0}_0 0)))
(assert (and (= mode2U_{0} true) (= mode2S_{0} true)))
(assert (and (= g2_{0}_0 7.5) (= d2_{0}_0 0)))
"""
goal_cond = """
(assert (and (>= tau_{0}_t 10)
(not (and (= mode1U_{0} false) (= mode1S_{0} false)
(= mode2U_{0} false) (= mode2S_{0} false)))))
"""
import sys
try:
bound = int(sys.argv[1])
except:
print("Usage:", sys.argv[0], "<Bound>")
else:
generate(bound, 1, [0,1,2], 3, init_cond, goal_cond)
|
ivanbaldo/yowsup
|
refs/heads/master
|
yowsup/layers/axolotl/protocolentities/receipt_outgoing_retry.py
|
35
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
from yowsup.layers.axolotl.protocolentities.iq_keys_get_result import ResultGetKeysIqProtocolEntity
class RetryOutgoingReceiptProtocolEntity(OutgoingReceiptProtocolEntity):
'''
<receipt type="retry" to="xxxxxxxxxxx@s.whatsapp.net" id="1415389947-12" t="1432833777">
<retry count="1" t="1432833266" id="1415389947-12" v="1">
</retry>
<registration>
HEX:xxxxxxxxx
</registration>
</receipt>
'''
def __init__(self, _id, to, t, v = "1", count = "1",regData = ""):
super(RetryOutgoingReceiptProtocolEntity, self).__init__(_id,to)
self.setRetryData(t,v,count,regData)
def setRetryData(self, t,v,count,regData):
self.t = int(t)
self.v = int(v)
self.count = int(count)
self.regData = regData
def setRegData(self,regData):
'''
In axolotl layer:
regData = self.store.getLocalRegistrationId()
'''
self.regData = ResultGetKeysIqProtocolEntity._intToBytes(regData)
def toProtocolTreeNode(self):
node = super(RetryOutgoingReceiptProtocolEntity, self).toProtocolTreeNode()
node.setAttribute("type", "retry")
retry = ProtocolTreeNode("retry", {"count": str(self.count),"t":str(self.t),"id":self.getId(),"v":str(self.v)})
node.addChild(retry)
registration = ProtocolTreeNode("registration",data=self.regData)
node.addChild(registration)
return node
def __str__(self):
out = super(RetryOutgoingReceiptProtocolEntity, self).__str__()
return out
@staticmethod
def fromProtocolTreeNode(node):
entity = OutgoingReceiptProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = RetryOutgoingReceiptProtocolEntity
retryNode = node.getChild("retry")
entity.setRetryData(retryNode["t"], retryNode["v"], retryNode["count"], node.getChild("registration").data)
@staticmethod
def fromMesageNode(MessageNodeToBeRetried):
return RetryOutgoingReceiptProtocolEntity(
MessageNodeToBeRetried.getAttributeValue("id"),
MessageNodeToBeRetried.getAttributeValue("from"),
MessageNodeToBeRetried.getAttributeValue("t"),
MessageNodeToBeRetried.getChild("enc").getAttributeValue("v")
)
|
erikr/django
|
refs/heads/master
|
tests/i18n/urls.py
|
128
|
from __future__ import unicode_literals
from django.conf.urls import url
from django.conf.urls.i18n import i18n_patterns
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.translation import ugettext_lazy as _
urlpatterns = i18n_patterns(
url(r'^simple/$', lambda r: HttpResponse()),
url(r'^streaming/$', lambda r: StreamingHttpResponse([_("Yes"), "/", _("No")])),
)
|
open-synergy/stock-logistics-workflow
|
refs/heads/8.0
|
product_serial/company.py
|
27
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Product serial module for OpenERP
# Copyright (C) 2010-2011 Anevia. All Rights Reserved
# Copyright (C) 2013 Akretion
# @author: Sebastien Beau <sebastien.beau@akretion.com>
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class company(orm.Model):
_inherit = 'res.company'
_columns = {
'autosplit_is_active': fields.boolean(
'Active auto split',
help="Active the automatic split of move lines on the pickings."),
'is_group_invoice_line': fields.boolean(
'Group invoice lines',
help="If active, OpenERP will group the identical invoice lines "
"when generating an invoice from a picking. If inactive, each "
"move line will generate one invoice line."),
}
_defaults = {
'autosplit_is_active': True,
'is_group_invoice_line': True,
}
|
henrytao-me/openerp.positionq
|
refs/heads/master
|
openerp/addons/mail/mail_vote.py
|
439
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
class mail_vote(osv.Model):
''' Mail vote feature allow users to like and unlike messages attached
to a document. This allows for example to build a ranking-based
displaying of messages, for FAQ. '''
_name = 'mail.vote'
_description = 'Mail Vote'
_columns = {
'message_id': fields.many2one('mail.message', 'Message', select=1,
ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', select=1,
ondelete='cascade', required=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
odooindia/odoo
|
refs/heads/master
|
addons/l10n_si/__openerp__.py
|
430
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright: (C) 2012 - Mentis d.o.o., Dravograd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Slovenian - Accounting",
"version" : "1.2",
"author" : "Mentis d.o.o.",
"website" : "http://www.mentis.si",
"category" : "Localization/Account Charts",
"description" : " ",
"depends" : ["account", "base_iban", "base_vat", "account_chart", "account_cancel"],
"description" : "Kontni načrt za gospodarske družbe",
"data" : [
"data/account.account.type.csv",
"data/account.account.template.csv",
"data/account.tax.code.template.csv",
"data/account.chart.template.csv",
"data/account.tax.template.csv",
"data/account.fiscal.position.template.csv",
"data/account.fiscal.position.account.template.csv",
"data/account.fiscal.position.tax.template.csv",
"l10n_si_wizard.xml"
],
'auto_install': False,
"installable": True,
}
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/raw/GLES2/QCOM/extended_get.py
|
8
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_QCOM_extended_get'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_QCOM_extended_get',error_checker=_errors._error_checker)
GL_STATE_RESTORE=_C('GL_STATE_RESTORE',0x8BDC)
GL_TEXTURE_DEPTH_QCOM=_C('GL_TEXTURE_DEPTH_QCOM',0x8BD4)
GL_TEXTURE_FORMAT_QCOM=_C('GL_TEXTURE_FORMAT_QCOM',0x8BD6)
GL_TEXTURE_HEIGHT_QCOM=_C('GL_TEXTURE_HEIGHT_QCOM',0x8BD3)
GL_TEXTURE_IMAGE_VALID_QCOM=_C('GL_TEXTURE_IMAGE_VALID_QCOM',0x8BD8)
GL_TEXTURE_INTERNAL_FORMAT_QCOM=_C('GL_TEXTURE_INTERNAL_FORMAT_QCOM',0x8BD5)
GL_TEXTURE_NUM_LEVELS_QCOM=_C('GL_TEXTURE_NUM_LEVELS_QCOM',0x8BD9)
GL_TEXTURE_OBJECT_VALID_QCOM=_C('GL_TEXTURE_OBJECT_VALID_QCOM',0x8BDB)
GL_TEXTURE_TARGET_QCOM=_C('GL_TEXTURE_TARGET_QCOM',0x8BDA)
GL_TEXTURE_TYPE_QCOM=_C('GL_TEXTURE_TYPE_QCOM',0x8BD7)
GL_TEXTURE_WIDTH_QCOM=_C('GL_TEXTURE_WIDTH_QCOM',0x8BD2)
@_f
@_p.types(None,_cs.GLenum,arrays.GLvoidpArray)
def glExtGetBufferPointervQCOM(target,params):pass
@_f
@_p.types(None,arrays.GLuintArray,_cs.GLint,arrays.GLintArray)
def glExtGetBuffersQCOM(buffers,maxBuffers,numBuffers):pass
@_f
@_p.types(None,arrays.GLuintArray,_cs.GLint,arrays.GLintArray)
def glExtGetFramebuffersQCOM(framebuffers,maxFramebuffers,numFramebuffers):pass
@_f
@_p.types(None,arrays.GLuintArray,_cs.GLint,arrays.GLintArray)
def glExtGetRenderbuffersQCOM(renderbuffers,maxRenderbuffers,numRenderbuffers):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLenum,_cs.GLint,_cs.GLenum,arrays.GLintArray)
def glExtGetTexLevelParameterivQCOM(texture,face,level,pname,params):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLint,_cs.GLsizei,_cs.GLsizei,_cs.GLsizei,_cs.GLenum,_cs.GLenum,ctypes.c_void_p)
def glExtGetTexSubImageQCOM(target,level,xoffset,yoffset,zoffset,width,height,depth,format,type,texels):pass
@_f
@_p.types(None,arrays.GLuintArray,_cs.GLint,arrays.GLintArray)
def glExtGetTexturesQCOM(textures,maxTextures,numTextures):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLint)
def glExtTexObjectStateOverrideiQCOM(target,pname,param):pass
|
maocubillos/pnt2015
|
refs/heads/master
|
src/bower_components/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py
|
566
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
|
mudithkr/zamboni
|
refs/heads/master
|
mkt/inapp/models.py
|
20
|
import json
from django.conf import settings
from django.db import models
from mkt.prices.models import Price
from mkt.site.models import ModelBase
from mkt.translations.fields import save_signal, TranslatedField
from mkt.webapps.models import UUIDModelMixin
class InAppProduct(UUIDModelMixin, ModelBase):
"""
An item which is purchasable from within a marketplace app.
"""
active = models.BooleanField(default=True, db_index=True)
guid = models.CharField(max_length=255, unique=True, null=True, blank=True)
webapp = models.ForeignKey('webapps.WebApp', null=True, blank=True)
price = models.ForeignKey(Price)
name = TranslatedField(require_locale=False)
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE.lower())
logo_url = models.URLField(max_length=1024, null=True, blank=True)
# The JSON value for the simulate parameter of a JWT.
# Example: {"result": "postback"}. This will be NULL for no simulation.
simulate = models.CharField(max_length=100, null=True, blank=True)
# When True, this is a stub product created internally.
stub = models.BooleanField(default=False, db_index=True)
class Meta:
db_table = 'inapp_products'
def __unicode__(self):
return u'<{cls} {id}: {app}: {name}>'.format(
app=self.webapp and self.webapp.name,
name=self.name, id=self.pk, cls=self.__class__.__name__)
@property
def icon_url(self):
return self.logo_url or (self.webapp and self.webapp.get_icon_url(64))
def simulate_data(self):
if not self.simulate:
return None
return json.loads(self.simulate)
def is_purchasable(self):
return self.active and (self.simulate or
(self.webapp and self.webapp.is_public()))
def delete(self):
raise models.ProtectedError('Inapp products may not be deleted.', self)
models.signals.pre_save.connect(save_signal, sender=InAppProduct,
dispatch_uid='inapp_products_translations')
|
GinnyN/Team-Fortress-RPG-Generators
|
refs/heads/master
|
tests/regressiontests/views/models.py
|
144
|
"""
Regression tests for Django built-in views.
"""
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/views/authors/%s/' % self.id
class BaseArticle(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author)
class Meta:
abstract = True
def __unicode__(self):
return self.title
class Article(BaseArticle):
date_created = models.DateTimeField()
class UrlArticle(BaseArticle):
"""
An Article class with a get_absolute_url defined.
"""
date_created = models.DateTimeField()
def get_absolute_url(self):
return '/urlarticles/%s/' % self.slug
get_absolute_url.purge = True
class DateArticle(BaseArticle):
"""
An article Model with a DateField instead of DateTimeField,
for testing #7602
"""
date_created = models.DateField()
|
lachesis/shed
|
refs/heads/master
|
helpers.py
|
1
|
import math
import gdal
import numpy as np
from tornado.httpclient import AsyncHTTPClient
from skimage.draw import line
from tornado.gen import Return
from tornado import gen
ZOOM=12
class CoordSystem(object):
@classmethod
def lnglat_to_pixel(cls, lnglat, zoom=ZOOM):
lng, lat = lnglat
lat *= math.pi / 180.0
lng *= math.pi / 180.0
x = 128.0 / math.pi * 2**zoom * (lng + math.pi)
y = 128.0 / math.pi * 2**zoom * (math.pi - math.log(math.tan(math.pi / 4.0 + lat / 2.0)))
return round(x), round(y)
@classmethod
def lnglat_to_tile(cls, lnglat, zoom=ZOOM):
r = cls.lnglat_to_pixel(lnglat, zoom)
return cls.pixel_to_tile(r, zoom)
@classmethod
def pixel_to_tile(cls, pixel, zoom=ZOOM):
return (int(pixel[0]//256), 2**zoom - int(pixel[1]//256) - 1)
@classmethod
def pixel_to_lnglat(cls, point, zoom=ZOOM):
x, y = point
lat = (4.0 * math.atan(math.exp(math.pi - y * math.pi / (128.0 * 2**zoom))) - math.pi) / 2.0
lng = x * math.pi / (128.0 * 2**zoom) - math.pi
lat *= 180.0 / math.pi
lng *= 180.0 / math.pi
return lng, lat
def load_float32_image(buffer):
try:
gdal.FileFromMemBuffer('/vsimem/temp', buffer)
ds = gdal.Open('/vsimem/temp')
channel = ds.GetRasterBand(1).ReadAsArray()
ds = None #cleanup
gdal.Unlink('/vsimem/temp') #cleanup
return channel
except Exception, e:
ds = None #cleanup
gdal.Unlink('/vsimem/temp') #cleanup
raise e
class Tile(object):
def __init__(self, zoom, pixel, url_template):
self.zoom = zoom
self.url_template = url_template
self.pixel = (pixel[0]//256*256, pixel[1]//256*256) #round pixel to top left corner
self.data = self._retrieve_data() #data is actually a future
@property
def url(self):
tile = CoordSystem.pixel_to_tile(self.pixel, self.zoom)
return self.url_template.format(z=self.zoom, x=tile[0], y=tile[1])
@gen.coroutine
def _retrieve_data(self):
self.res = yield AsyncHTTPClient().fetch(self.url) #NOTE: currently will throw an http error if status code is not 200
raise Return(load_float32_image(self.res.body) if self.res.code == 200 else None)
class TileSampler(object):
"""Samples tile values. Everything is in global pixel space at specified zoom"""
def __init__(self, zoom=ZOOM, url_template='http://127.0.0.1:8080/{z}/{x}/{y}.tiff'):
self.zoom = zoom
self.url_template = url_template
self._tiles = {}
def _unique_rows(self, data):
"""Returns only the unique rows in a numpy array
Args:
data (array): numpy array to dedupe
Returns:
array: deduped numpy array
"""
uniq = np.unique(data.view(data.dtype.descr * data.shape[1]))
return uniq.view(data.dtype).reshape(-1, data.shape[1])
@gen.coroutine
def _sample_tile_pixels(self, tile_pixel, pixels):
"""Returns pixel's values which intersect a single tile"""
pixels = pixels - np.array(tile_pixel)
pixels = pixels[(pixels[:,0]>=0) & (pixels[:,0]<=255) & (pixels[:,1]>=0) & (pixels[:,1]<=255)]
xs = pixels[:,0]
ys = pixels[:,1]
tile_data = yield self.get_tile(tile_pixel).data
raise Return(tile_data[ys.astype(int), xs.astype(int)]) #numpy is row column
def get_tile(self, pixel):
"""Returns a tile. If tile already exists a cached version will be returned.
Args:
pixel ((int, int)): A pixel in the tile to be returned
"""
tile_pixel = (pixel[0]//256*256, pixel[1]//256*256)
if tile_pixel not in self._tiles:
self._tiles[tile_pixel] = Tile(self.zoom, tile_pixel, self.url_template)
return self._tiles[tile_pixel]
@gen.coroutine
def sample_pixels(self, pixels):
"""Samples arbitrary pixel values from the map. Returned order may not match input order (will for lines)
Args:
pixels (array): 2d numpy array where each row is a pixel to sample
Returns:
array: numpy array of values
"""
#determin required tiles
tile_pixels = np.floor_divide(pixels, 256)*256
tile_pixels = self._unique_rows(tile_pixels)
#preload tiles
for tile_pixel in tile_pixels: self.get_tile(tile_pixel)
#sample tiles
data = [(yield self._sample_tile_pixels(tile_pixel, pixels)) for tile_pixel in tile_pixels]
raise Return((np.concatenate(data), pixels))
@gen.coroutine
def sample_line(self, pixel1, pixel2):
"""Samples a line of pixel values in the map. Pixel values should be ordered from pixel1 to pixel2.
Args:
pixel1 ((int,int)): the 1st coordinate of the line in global pixel space
pixel2 ((int,int)): the 2nd coordinate of the line in global pixel space
Returns:
array: numpy array of values
"""
pixel1 = map(int, pixel1)
pixel2 = map(int, pixel2)
xs, ys = line(pixel1[0], pixel1[1], pixel2[0], pixel2[1])
pixels = np.dstack((xs, ys))[0]
raise Return((yield self.sample_pixels(pixels)))
@gen.coroutine
def sample_pixel(self, pixel):
raise Return((yield self.sample_pixels(np.array([pixel])))[0][0])
#Testing urls
#http://localhost:8888/elevation/-122.30933440000001/37.8666298
#http://localhost:8888/shed/-122.30933440000001/37.8666298/10/300
|
willcode/gnuradio
|
refs/heads/master
|
grc/tests/test_block_templates.py
|
15
|
import pytest
from grc.core.blocks._templates import MakoTemplates
from grc.core.errors import TemplateError
class Block(object):
namespace_templates = {}
templates = MakoTemplates(None)
def __init__(self, **kwargs):
self.namespace_templates.update(kwargs)
def test_simple():
t = MakoTemplates(_bind_to=Block(num='123'), test='abc${num}')
assert t['test'] == 'abc${num}'
assert t.render('test') == 'abc123'
assert 'abc${num}' in t._template_cache
def test_instance():
block = Block(num='123')
block.templates['test'] = 'abc${num}'
assert block.templates.render('test') == 'abc123'
assert block.templates is block.__dict__['templates']
def test_list():
templates = ['abc${num}', '${2 * num}c']
t = MakoTemplates(_bind_to=Block(num='123'), test=templates)
assert t['test'] == templates
assert t.render('test') == ['abc123', '123123c']
assert set(templates) == set(t._template_cache.keys())
def test_parse_error():
with pytest.raises(TemplateError):
MakoTemplates(_bind_to=Block(num='123'), test='abc${num NOT CLOSING').render('test')
def test_parse_error2():
with pytest.raises(TemplateError):
MakoTemplates(_bind_to=Block(num='123'), test='abc${ WRONG_VAR }').render('test')
|
nthien/pulp
|
refs/heads/master
|
server/pulp/server/webservices/views/roles.py
|
7
|
from django.core.urlresolvers import reverse
from django.views.generic import View
from pulp.server import exceptions as pulp_exceptions
from pulp.server.auth import authorization
from pulp.server.managers import factory
from pulp.server.webservices.views.decorators import auth_required
from pulp.server.webservices.views.util import (generate_json_response,
generate_json_response_with_pulp_encoder,
generate_redirect_response,
json_body_required)
class RolesView(View):
"""
Views for roles.
"""
@auth_required(authorization.READ)
def get(self, request):
"""
List all roles.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return: Response containing a list of roles
:rtype: django.http.HttpResponse
"""
role_query_manager = factory.role_query_manager()
user_query_manager = factory.user_query_manager()
permissions_manager = factory.permission_manager()
roles = role_query_manager.find_all()
for role in roles:
role['users'] = [u['login'] for u in
user_query_manager.find_users_belonging_to_role(role['id'])]
resource_permission = {}
# isolate schema change
if role['permissions']:
for item in role['permissions']:
resource = item['resource']
operations = item.get('permission', [])
resource_permission[resource] = [permissions_manager.operation_value_to_name(o)
for o in operations]
role['permissions'] = resource_permission
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
return generate_json_response_with_pulp_encoder(roles)
@auth_required(authorization.CREATE)
@json_body_required
def post(self, request):
"""
Create a new role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return: Response containing the role
:rtype: django.http.HttpResponse
"""
role_data = request.body_as_json
role_id = role_data.get('role_id', None)
display_name = role_data.get('display_name', None)
description = role_data.get('description', None)
manager = factory.role_manager()
role = manager.create_role(role_id, display_name, description)
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
response = generate_json_response_with_pulp_encoder(role)
redirect_response = generate_redirect_response(response, link['_href'])
return redirect_response
class RoleResourceView(View):
"""
Views for a single role.
"""
@auth_required(authorization.READ)
def get(self, request, role_id):
"""
Retrieve a specific role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: Response containing the role
:rtype: django.http.HttpResponse
:raises: MissingResource if role ID does not exist
"""
role = factory.role_query_manager().find_by_id(role_id)
if role is None:
raise pulp_exceptions.MissingResource(role_id)
role['users'] = [u['login'] for u in
factory.user_query_manager().find_users_belonging_to_role(role['id'])]
permissions_manager = factory.permission_manager()
# isolate schema change
resource_permission = {}
for item in role['permissions']:
resource = item['resource']
operations = item.get('permission', [])
resource_permission[resource] = [permissions_manager.operation_value_to_name(o)
for o in operations]
role['permissions'] = resource_permission
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
return generate_json_response_with_pulp_encoder(role)
@auth_required(authorization.DELETE)
def delete(self, request, role_id):
"""
Delete a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: An empty response
:rtype: django.http.HttpResponse
"""
manager = factory.role_manager()
result = manager.delete_role(role_id)
return generate_json_response(result)
@auth_required(authorization.UPDATE)
@json_body_required
def put(self, request, role_id):
"""
Update a specific role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: Response containing the role
:rtype: django.http.HttpResponse
"""
role_data = request.body_as_json
delta = role_data.get('delta', None)
manager = factory.role_manager()
role = manager.update_role(role_id, delta)
link = {'_href': reverse('role_resource',
kwargs={'role_id': role['id']})}
role.update(link)
return generate_json_response_with_pulp_encoder(role)
class RoleUsersView(View):
"""
Views for user membership within a role
"""
@auth_required(authorization.READ)
def get(self, request, role_id):
"""
List Users belonging to a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: Response containing the users
:rtype: django.http.HttpResponse
"""
user_query_manager = factory.user_query_manager()
role_users = user_query_manager.find_users_belonging_to_role(role_id)
return generate_json_response_with_pulp_encoder(role_users)
@auth_required(authorization.UPDATE)
@json_body_required
def post(self, request, role_id):
"""
Add user to a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:return: An empty response
:rtype: django.http.HttpResponse
:raises: InvalidValue some parameters are invalid
"""
params = request.body_as_json
login = params.get('login', None)
if login is None:
raise pulp_exceptions.InvalidValue(login)
role_manager = factory.role_manager()
add_user = role_manager.add_user_to_role(role_id, login)
return generate_json_response(add_user)
class RoleUserView(View):
"""
View for specific user membership within a role.
"""
@auth_required(authorization.DELETE)
def delete(self, request, role_id, login):
"""
Remove user from a role.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param role_id: id for the requested role
:type role_id: str
:param login: id for the requested user
:type login: str
:return: An empty response
:rtype: django.http.HttpResponse
"""
role_manager = factory.role_manager()
remove_user = role_manager.remove_user_from_role(role_id, login)
return generate_json_response(remove_user)
|
quanvm009/codev7
|
refs/heads/master
|
openerp/addons/crm_helpdesk/crm_helpdesk.py
|
38
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_status.base_state import base_state
from openerp.addons.base_status.base_stage import base_stage
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
CRM_HELPDESK_STATES = (
crm.AVAILABLE_STATES[2][0], # Cancelled
crm.AVAILABLE_STATES[3][0], # Done
crm.AVAILABLE_STATES[4][0], # Pending
)
class crm_helpdesk(base_state, base_stage, osv.osv):
""" Helpdesk Cases """
_name = "crm.helpdesk"
_description = "Helpdesk"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Name', size=128, required=True),
'active': fields.boolean('Active', required=False),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'description': fields.text('Description'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'user_id': fields.many2one('res.users', 'Responsible'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Responsible sales team. Define Responsible user and Email account for mail gateway.'),
'company_id': fields.many2one('res.company', 'Company'),
'date_closed': fields.datetime('Closed', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252 , help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway"),
'date': fields.datetime('Date'),
'ref' : fields.reference('Reference', selection=crm._links_get, size=128),
'ref2' : fields.reference('Reference 2', selection=crm._links_get, size=128),
'channel_id': fields.many2one('crm.case.channel', 'Channel', help="Communication channel."),
'planned_revenue': fields.float('Planned Revenue'),
'planned_cost': fields.float('Planned Costs'),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'probability': fields.float('Probability (%)'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.helpdesk')]"),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'state': fields.selection(crm.AVAILABLE_STATES, 'Status', size=16, readonly=True,
help='The status is set to \'Draft\', when a case is created.\
\nIf the case is in progress the status is set to \'Open\'.\
\nWhen the case is over, the status is set to \'Done\'.\
\nIf the case needs to be reviewed then the status is set to \'Pending\'.'),
}
_defaults = {
'active': lambda *a: 1,
'user_id': lambda s, cr, uid, c: s._get_default_user(cr, uid, c),
'partner_id': lambda s, cr, uid, c: s._get_default_partner(cr, uid, c),
'email_from': lambda s, cr, uid, c: s._get_default_email(cr, uid, c),
'state': lambda *a: 'draft',
'date': lambda *a: fields.datetime.now(),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': lambda *a: crm.AVAILABLE_PRIORITIES[2][0],
}
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'user_id': False,
'partner_id': msg.get('author_id', False),
}
defaults.update(custom_values)
return super(crm_helpdesk, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
albireox/marvin
|
refs/heads/master
|
python/marvin/tests/tools/test_modelcube.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2017-08-15
# @Filename: test_modelcube.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-07-20 19:13:16
from __future__ import absolute_import, division, print_function
import os
import pytest
import six
from astropy.io import fits
from astropy.wcs import WCS
from marvin.core.exceptions import MarvinError
from marvin.tools.cube import Cube
from marvin.tools.maps import Maps
from marvin.tools.modelcube import ModelCube
from .. import marvin_test_if
@pytest.fixture(autouse=True)
def skipmpl4(galaxy):
if galaxy.release == 'MPL-4':
pytest.skip('No modelcubes in MPL-4')
def is_string(string):
return isinstance(string, six.string_types)
class TestModelCubeInit(object):
def _test_init(self, model_cube, galaxy, bintype=None, template=None):
assert model_cube.release == galaxy.release
assert model_cube._drpver == galaxy.drpver
assert model_cube._dapver == galaxy.dapver
assert model_cube.bintype.name == bintype if bintype else galaxy.bintype.name
assert model_cube.template == template if template else galaxy.template
assert model_cube.plateifu == galaxy.plateifu
assert model_cube.mangaid == galaxy.mangaid
assert isinstance(model_cube.header, fits.Header)
assert isinstance(model_cube.wcs, WCS)
assert model_cube._wavelength is not None
assert model_cube._redcorr is not None
def test_init_modelcube(self, galaxy, data_origin):
if data_origin == 'file':
kwargs = {'filename': galaxy.modelpath}
elif data_origin == 'db':
kwargs = {'plateifu': galaxy.plateifu}
elif data_origin == 'api':
kwargs = {'plateifu': galaxy.plateifu, 'mode': 'remote'}
model_cube = ModelCube(**kwargs)
assert model_cube.data_origin == data_origin
assert model_cube.nsa.z == pytest.approx(galaxy.redshift)
self._test_init(model_cube, galaxy)
def test_init_from_file_global_mpl4(self, galaxy):
model_cube = ModelCube(filename=galaxy.modelpath, release='MPL-4')
assert model_cube.data_origin == 'file'
self._test_init(model_cube, galaxy)
def test_raises_exception_mpl4(self, galaxy):
with pytest.raises(MarvinError) as cm:
ModelCube(plateifu=galaxy.plateifu, release='MPL-4')
assert 'ModelCube requires at least dapver=\'2.0.2\'' in str(cm.value)
def test_init_modelcube_bintype(self, galaxy, data_origin):
kwargs = {'bintype': galaxy.bintype.name}
if data_origin == 'file':
kwargs['filename'] = galaxy.modelpath
elif data_origin == 'db':
kwargs['plateifu'] = galaxy.plateifu
elif data_origin == 'api':
kwargs['plateifu'] = galaxy.plateifu
kwargs['mode'] = 'remote'
model_cube = ModelCube(**kwargs)
assert model_cube.data_origin == data_origin
self._test_init(model_cube, galaxy, bintype=galaxy.bintype.name)
class TestModelCube(object):
@marvin_test_if(mark='include', galaxy={'plateifu': '8485-1901'})
def test_get_flux_db(self, galaxy):
model_cube = ModelCube(plateifu=galaxy.plateifu)
shape = tuple([4563] + galaxy.shape)
assert model_cube.binned_flux.shape == shape
def test_get_cube_file(self, galaxy):
model_cube = ModelCube(filename=galaxy.modelpath)
assert isinstance(model_cube.getCube(), Cube)
def test_get_maps_api(self, galaxy):
model_cube = ModelCube(plateifu=galaxy.plateifu, mode='remote')
assert isinstance(model_cube.getMaps(), Maps)
def test_nobintype_in_db(self, galaxy):
if galaxy.release != 'MPL-6':
pytest.skip('only running this test for MPL6')
with pytest.raises(MarvinError) as cm:
maps = ModelCube(plateifu=galaxy.plateifu, bintype='ALL', release=galaxy.release)
assert 'Specified bintype ALL is not available in the DB' in str(cm.value)
@pytest.mark.parametrize('objtype, errmsg',
[('cube', 'Trying to open a non DAP file with Marvin ModelCube'),
('maps', 'Trying to open a DAP MAPS with Marvin ModelCube')])
def test_modelcube_wrong_file(self, galaxy, objtype, errmsg):
path = galaxy.cubepath if objtype == 'cube' else galaxy.mapspath
with pytest.raises(MarvinError) as cm:
ModelCube(filename=path)
assert errmsg in str(cm.value)
class TestPickling(object):
def test_pickling_file(self, temp_scratch, galaxy):
modelcube = ModelCube(filename=galaxy.modelpath, bintype=galaxy.bintype)
assert modelcube.data_origin == 'file'
assert isinstance(modelcube, ModelCube)
assert modelcube.data is not None
file = temp_scratch.join('test_modelcube.mpf')
modelcube.save(str(file))
assert file.check() is True
assert modelcube.data is not None
modelcube = None
assert modelcube is None
modelcube_restored = ModelCube.restore(str(file))
assert modelcube_restored.data_origin == 'file'
assert isinstance(modelcube_restored, ModelCube)
assert modelcube_restored.data is not None
def test_pickling_file_custom_path(self, temp_scratch, galaxy):
modelcube = ModelCube(filename=galaxy.modelpath, bintype=galaxy.bintype)
assert modelcube.data_origin == 'file'
assert isinstance(modelcube, ModelCube)
assert modelcube.data is not None
file = temp_scratch.join('mcpickle').join('test_modelcube.mpf')
assert file.check(file=1) is False
path = modelcube.save(path=str(file))
assert file.check() is True
assert os.path.exists(path)
modelcube_restored = ModelCube.restore(str(file), delete=True)
assert modelcube_restored.data_origin == 'file'
assert isinstance(modelcube_restored, ModelCube)
assert modelcube_restored.data is not None
assert not os.path.exists(path)
def test_pickling_db(self, galaxy, temp_scratch):
modelcube = ModelCube(plateifu=galaxy.plateifu, bintype=galaxy.bintype)
file = temp_scratch.join('test_modelcube_db.mpf')
with pytest.raises(MarvinError) as cm:
modelcube.save(str(file))
assert 'objects with data_origin=\'db\' cannot be saved.' in str(cm.value)
def test_pickling_api(self, temp_scratch, galaxy):
modelcube = ModelCube(plateifu=galaxy.plateifu, bintype=galaxy.bintype, mode='remote')
assert modelcube.data_origin == 'api'
assert isinstance(modelcube, ModelCube)
assert modelcube.data is None
file = temp_scratch.join('test_modelcube_api.mpf')
modelcube.save(str(file))
assert file.check() is True
modelcube = None
assert modelcube is None
modelcube_restored = ModelCube.restore(str(file))
assert modelcube_restored.data_origin == 'api'
assert isinstance(modelcube_restored, ModelCube)
assert modelcube_restored.data is None
class TestMaskbit(object):
def test_quality_flag(self, galaxy):
modelcube = ModelCube(plateifu=galaxy.plateifu, bintype=galaxy.bintype)
assert modelcube.quality_flag is not None
@pytest.mark.parametrize('flag',
['manga_target1',
'manga_target2',
'manga_target3',
'target_flags'])
def test_flag(self, flag, galaxy):
modelcube = ModelCube(plateifu=galaxy.plateifu, bintype=galaxy.bintype)
assert getattr(modelcube, flag, None) is not None
|
ychen820/microblog
|
refs/heads/master
|
y/google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/integration/cloudformation/test_connection.py
|
31
|
#!/usr/bin/env python
import time
import json
from tests.unit import unittest
from boto.cloudformation.connection import CloudFormationConnection
BASIC_EC2_TEMPLATE = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template EC2InstanceSample",
"Parameters": {
},
"Mappings": {
"RegionMap": {
"us-east-1": {
"AMI": "ami-7f418316"
}
}
},
"Resources": {
"Ec2Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"AMI"
]
},
"UserData": {
"Fn::Base64": "a" * 15000
}
}
}
},
"Outputs": {
"InstanceId": {
"Description": "InstanceId of the newly created EC2 instance",
"Value": {
"Ref": "Ec2Instance"
}
},
"AZ": {
"Description": "Availability Zone of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"AvailabilityZone"
]
}
},
"PublicIP": {
"Description": "Public IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicIp"
]
}
},
"PrivateIP": {
"Description": "Private IP address of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateIp"
]
}
},
"PublicDNS": {
"Description": "Public DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PublicDnsName"
]
}
},
"PrivateDNS": {
"Description": "Private DNSName of the newly created EC2 instance",
"Value": {
"Fn::GetAtt": [
"Ec2Instance",
"PrivateDnsName"
]
}
}
}
}
class TestCloudformationConnection(unittest.TestCase):
def setUp(self):
self.connection = CloudFormationConnection()
self.stack_name = 'testcfnstack' + str(int(time.time()))
def test_large_template_stack_size(self):
# See https://github.com/boto/boto/issues/1037
body = self.connection.create_stack(
self.stack_name,
template_body=json.dumps(BASIC_EC2_TEMPLATE))
self.addCleanup(self.connection.delete_stack, self.stack_name)
# A newly created stack should have events
events = self.connection.describe_stack_events(self.stack_name)
self.assertTrue(events)
# No policy should be set on the stack by default
policy = self.connection.get_stack_policy(self.stack_name)
self.assertEqual(None, policy)
# Our new stack should show up in the stack list
stacks = self.connection.describe_stacks()
self.assertEqual(self.stack_name, stacks[0].stack_name)
if __name__ == '__main__':
unittest.main()
|
xbs13/influxdb-python
|
refs/heads/master
|
influxdb/resultset.py
|
5
|
# -*- coding: utf-8 -*-
import warnings
from influxdb.exceptions import InfluxDBClientError
_sentinel = object()
class ResultSet(object):
"""A wrapper around a single InfluxDB query result"""
def __init__(self, series, raise_errors=True):
self._raw = series
self._error = self.raw.get('error', None)
if self.error is not None and raise_errors is True:
raise InfluxDBClientError(self.error)
@property
def raw(self):
"""Raw JSON from InfluxDB"""
return self._raw
@raw.setter
def raw(self, value):
self._raw = value
@property
def error(self):
"""Error returned by InfluxDB"""
return self._error
def __getitem__(self, key):
"""
:param key: Either a serie name, or a tags_dict, or
a 2-tuple(serie_name, tags_dict).
If the serie name is None (or not given) then any serie
matching the eventual given tags will be given its points
one after the other.
To get the points of every serie in this resultset then
you have to provide None as key.
:return: A generator yielding `Point`s matching the given key.
NB:
The order in which the points are yielded is actually undefined but
it might change..
"""
warnings.warn(
("ResultSet's ``__getitem__`` method will be deprecated. Use"
"``get_points`` instead."),
DeprecationWarning
)
if isinstance(key, tuple):
if 2 != len(key):
raise TypeError('only 2-tuples allowed')
name = key[0]
tags = key[1]
if not isinstance(tags, dict) and tags is not None:
raise TypeError('tags should be a dict')
elif isinstance(key, dict):
name = None
tags = key
else:
name = key
tags = None
return self.get_points(name, tags)
def get_points(self, measurement=None, tags=None):
"""
Returns a generator for all the points that match the given filters.
:param measurement: The measurement name
:type measurement: str
:param tags: Tags to look for
:type tags: dict
:return: Points generator
"""
# Raise error if measurement is not str or bytes
if not isinstance(measurement,
(bytes, type(b''.decode()), type(None))):
raise TypeError('measurement must be an str or None')
for serie in self._get_series():
serie_name = serie.get('measurement', serie.get('name', 'results'))
if serie_name is None:
# this is a "system" query or a query which
# doesn't return a name attribute.
# like 'show retention policies' ..
if tags is None:
for point in serie['values']:
yield self.point_from_cols_vals(
serie['columns'],
point
)
elif measurement in (None, serie_name):
# by default if no tags was provided then
# we will matches every returned serie
serie_tags = serie.get('tags', {})
if tags is None or self._tag_matches(serie_tags, tags):
for point in serie.get('values', []):
yield self.point_from_cols_vals(
serie['columns'],
point
)
def __repr__(self):
items = []
for item in self.items():
items.append("'%s': %s" % (item[0], list(item[1])))
return "ResultSet({%s})" % ", ".join(items)
def __iter__(self):
""" Iterating a ResultSet will yield one dict instance per serie result.
"""
for key in self.keys():
yield list(self.__getitem__(key))
def _tag_matches(self, tags, filter):
"""Checks if all key/values in filter match in tags"""
for tag_name, tag_value in filter.items():
# using _sentinel as I'm not sure that "None"
# could be used, because it could be a valid
# serie_tags value : when a serie has no such tag
# then I think it's set to /null/None/.. TBC..
serie_tag_value = tags.get(tag_name, _sentinel)
if serie_tag_value != tag_value:
return False
return True
def _get_series(self):
"""Returns all series"""
return self.raw.get('series', [])
def __len__(self):
return len(self.keys())
def keys(self):
"""
:return: List of keys. Keys are tuples (serie_name, tags)
"""
keys = []
for serie in self._get_series():
keys.append(
(serie.get('measurement',
serie.get('name', 'results')),
serie.get('tags', None))
)
return keys
def items(self):
"""
:return: List of tuples, (key, generator)
"""
items = []
for serie in self._get_series():
serie_key = (serie.get('measurement',
serie.get('name', 'results')),
serie.get('tags', None))
items.append(
(serie_key, self[serie_key])
)
return items
@staticmethod
def point_from_cols_vals(cols, vals):
""" Creates a dict from columns and values lists
:param cols: List of columns
:param vals: List of values
:return: Dict where keys are columns.
"""
point = {}
for col_index, col_name in enumerate(cols):
point[col_name] = vals[col_index]
return point
|
infoxchange/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/contrib/localflavor/sk/sk_regions.py
|
543
|
"""
Slovak regions according to http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska
"""
from django.utils.translation import ugettext_lazy as _
REGION_CHOICES = (
('BB', _('Banska Bystrica region')),
('BA', _('Bratislava region')),
('KE', _('Kosice region')),
('NR', _('Nitra region')),
('PO', _('Presov region')),
('TN', _('Trencin region')),
('TT', _('Trnava region')),
('ZA', _('Zilina region')),
)
|
o3project/ryu-oe
|
refs/heads/master
|
ryu/tests/unit/packet/test_vlan.py
|
18
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import struct
from struct import *
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from ryu.ofproto import ether, inet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.vlan import vlan
from ryu.lib.packet.vlan import svlan
LOG = logging.getLogger('test_vlan')
class Test_vlan(unittest.TestCase):
""" Test case for vlan
"""
pcp = 0
cfi = 0
vid = 32
tci = pcp << 15 | cfi << 12 | vid
ethertype = ether.ETH_TYPE_IP
buf = pack(vlan._PACK_STR, tci, ethertype)
v = vlan(pcp, cfi, vid, ethertype)
def setUp(self):
pass
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.pcp, self.v.pcp)
eq_(self.cfi, self.v.cfi)
eq_(self.vid, self.v.vid)
eq_(self.ethertype, self.v.ethertype)
def test_parser(self):
res, ptype, _ = self.v.parser(self.buf)
eq_(res.pcp, self.pcp)
eq_(res.cfi, self.cfi)
eq_(res.vid, self.vid)
eq_(res.ethertype, self.ethertype)
eq_(ptype, ipv4)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.v.serialize(data, prev)
fmt = vlan._PACK_STR
res = struct.unpack(fmt, buf)
eq_(res[0], self.tci)
eq_(res[1], self.ethertype)
def _build_vlan(self):
src_mac = '00:07:0d:af:f4:54'
dst_mac = '00:00:00:00:00:00'
ethertype = ether.ETH_TYPE_8021Q
e = ethernet(dst_mac, src_mac, ethertype)
version = 4
header_length = 20
tos = 0
total_length = 24
identification = 0x8a5d
flags = 0
offset = 1480
ttl = 64
proto = inet.IPPROTO_ICMP
csum = 0xa7f2
src = '131.151.32.21'
dst = '131.151.32.129'
option = 'TEST'
ip = ipv4(version, header_length, tos, total_length, identification,
flags, offset, ttl, proto, csum, src, dst, option)
p = Packet()
p.add_protocol(e)
p.add_protocol(self.v)
p.add_protocol(ip)
p.serialize()
return p
def test_build_vlan(self):
p = self._build_vlan()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_8021Q)
v = self.find_protocol(p, "vlan")
ok_(v)
eq_(v.ethertype, ether.ETH_TYPE_IP)
ip = self.find_protocol(p, "ipv4")
ok_(ip)
eq_(v.pcp, self.pcp)
eq_(v.cfi, self.cfi)
eq_(v.vid, self.vid)
eq_(v.ethertype, self.ethertype)
@raises(Exception)
def test_malformed_vlan(self):
m_short_buf = self.buf[1:vlan._MIN_LEN]
vlan.parser(m_short_buf)
def test_json(self):
jsondict = self.v.to_jsondict()
v = vlan.from_jsondict(jsondict['vlan'])
eq_(str(self.v), str(v))
class Test_svlan(unittest.TestCase):
pcp = 0
cfi = 0
vid = 32
tci = pcp << 15 | cfi << 12 | vid
ethertype = ether.ETH_TYPE_8021Q
buf = pack(svlan._PACK_STR, tci, ethertype)
sv = svlan(pcp, cfi, vid, ethertype)
def setUp(self):
pass
def tearDown(self):
pass
def find_protocol(self, pkt, name):
for p in pkt.protocols:
if p.protocol_name == name:
return p
def test_init(self):
eq_(self.pcp, self.sv.pcp)
eq_(self.cfi, self.sv.cfi)
eq_(self.vid, self.sv.vid)
eq_(self.ethertype, self.sv.ethertype)
def test_parser(self):
res, ptype, _ = self.sv.parser(self.buf)
eq_(res.pcp, self.pcp)
eq_(res.cfi, self.cfi)
eq_(res.vid, self.vid)
eq_(res.ethertype, self.ethertype)
eq_(ptype, vlan)
def test_serialize(self):
data = bytearray()
prev = None
buf = self.sv.serialize(data, prev)
fmt = svlan._PACK_STR
res = struct.unpack(fmt, buf)
eq_(res[0], self.tci)
eq_(res[1], self.ethertype)
def _build_svlan(self):
src_mac = '00:07:0d:af:f4:54'
dst_mac = '00:00:00:00:00:00'
ethertype = ether.ETH_TYPE_8021AD
e = ethernet(dst_mac, src_mac, ethertype)
pcp = 0
cfi = 0
vid = 32
tci = pcp << 15 | cfi << 12 | vid
ethertype = ether.ETH_TYPE_IP
v = vlan(pcp, cfi, vid, ethertype)
version = 4
header_length = 20
tos = 0
total_length = 24
identification = 0x8a5d
flags = 0
offset = 1480
ttl = 64
proto = inet.IPPROTO_ICMP
csum = 0xa7f2
src = '131.151.32.21'
dst = '131.151.32.129'
option = 'TEST'
ip = ipv4(version, header_length, tos, total_length, identification,
flags, offset, ttl, proto, csum, src, dst, option)
p = Packet()
p.add_protocol(e)
p.add_protocol(self.sv)
p.add_protocol(v)
p.add_protocol(ip)
p.serialize()
return p
def test_build_svlan(self):
p = self._build_svlan()
e = self.find_protocol(p, "ethernet")
ok_(e)
eq_(e.ethertype, ether.ETH_TYPE_8021AD)
sv = self.find_protocol(p, "svlan")
ok_(sv)
eq_(sv.ethertype, ether.ETH_TYPE_8021Q)
v = self.find_protocol(p, "vlan")
ok_(v)
eq_(v.ethertype, ether.ETH_TYPE_IP)
ip = self.find_protocol(p, "ipv4")
ok_(ip)
eq_(sv.pcp, self.pcp)
eq_(sv.cfi, self.cfi)
eq_(sv.vid, self.vid)
eq_(sv.ethertype, self.ethertype)
@raises(Exception)
def test_malformed_svlan(self):
m_short_buf = self.buf[1:svlan._MIN_LEN]
svlan.parser(m_short_buf)
def test_json(self):
jsondict = self.sv.to_jsondict()
sv = svlan.from_jsondict(jsondict['svlan'])
eq_(str(self.sv), str(sv))
|
konieboy/Seng_403
|
refs/heads/master
|
Gender Computer/unidecode/x1d7.py
|
248
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'0', # 0xce
'1', # 0xcf
'2', # 0xd0
'3', # 0xd1
'4', # 0xd2
'5', # 0xd3
'6', # 0xd4
'7', # 0xd5
'8', # 0xd6
'9', # 0xd7
'0', # 0xd8
'1', # 0xd9
'2', # 0xda
'3', # 0xdb
'4', # 0xdc
'5', # 0xdd
'6', # 0xde
'7', # 0xdf
'8', # 0xe0
'9', # 0xe1
'0', # 0xe2
'1', # 0xe3
'2', # 0xe4
'3', # 0xe5
'4', # 0xe6
'5', # 0xe7
'6', # 0xe8
'7', # 0xe9
'8', # 0xea
'9', # 0xeb
'0', # 0xec
'1', # 0xed
'2', # 0xee
'3', # 0xef
'4', # 0xf0
'5', # 0xf1
'6', # 0xf2
'7', # 0xf3
'8', # 0xf4
'9', # 0xf5
'0', # 0xf6
'1', # 0xf7
'2', # 0xf8
'3', # 0xf9
'4', # 0xfa
'5', # 0xfb
'6', # 0xfc
'7', # 0xfd
'8', # 0xfe
'9', # 0xff
)
|
dayatz/taiga-back
|
refs/heads/stable
|
taiga/export_import/serializers/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .serializers import PointsExportSerializer
from .serializers import UserStoryStatusExportSerializer
from .serializers import TaskStatusExportSerializer
from .serializers import IssueStatusExportSerializer
from .serializers import PriorityExportSerializer
from .serializers import SeverityExportSerializer
from .serializers import IssueTypeExportSerializer
from .serializers import RoleExportSerializer
from .serializers import UserStoryCustomAttributeExportSerializer
from .serializers import TaskCustomAttributeExportSerializer
from .serializers import IssueCustomAttributeExportSerializer
from .serializers import BaseCustomAttributesValuesExportSerializer
from .serializers import UserStoryCustomAttributesValuesExportSerializer
from .serializers import TaskCustomAttributesValuesExportSerializer
from .serializers import IssueCustomAttributesValuesExportSerializer
from .serializers import MembershipExportSerializer
from .serializers import RolePointsExportSerializer
from .serializers import MilestoneExportSerializer
from .serializers import TaskExportSerializer
from .serializers import UserStoryExportSerializer
from .serializers import IssueExportSerializer
from .serializers import WikiPageExportSerializer
from .serializers import WikiLinkExportSerializer
from .serializers import TimelineExportSerializer
from .serializers import ProjectExportSerializer
from .mixins import AttachmentExportSerializer
from .mixins import HistoryExportSerializer
|
bsipocz/astroML
|
refs/heads/fetch_cnn_images
|
astroML/clustering/mst_clustering.py
|
2
|
"""
Minimum Spanning Tree Clustering
"""
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.neighbors import kneighbors_graph
try:
from scipy.sparse.csgraph import (
minimum_spanning_tree, connected_components)
except ImportError:
raise ValueError("scipy v0.11 or greater required "
"for minimum spanning tree")
class HierarchicalClustering(BaseEstimator):
"""Hierarchical Clustering via Approximate Euclidean Minimum Spanning Tree
Parameters
----------
n_neighbors : int
number of neighbors of each point used for approximate Euclidean
minimum spanning tree (MST) algorithm. See Notes below.
edge_cutoff : float
specify a fraction of edges to keep when selecting clusters.
edge_cutoff should be between 0 and 1.
min_cluster_size : int, optional
specify a minimum number of points per cluster. If not specified,
all clusters will be kept.
Attributes
----------
X_train_ : ndarray
the training data
full_tree_ : sparse graph
the full approximate Euclidean MST spanning the data
cluster_graph_ : sparse graph
the final (truncated) graph showing clusters
n_components_ : int
the number of clusters found.
labels_ : int
the cluster labels for each training point. Labels range from -1
to n_components_ - 1: points labeled -1 are in the background (i.e.
their clusters were smaller than min_cluster_size)
Notes
-----
This routine uses an approximate Euclidean minimum spanning tree (MST)
to perform hierarchical clustering. A true Euclidean minimum spanning
tree naively costs O[N^3]. Graph traversal algorithms only help so much,
because all N^2 edges must be used as candidates. In this approximate
algorithm, we use k < N edges from each point, so that the cost is only
O[Nk log(Nk)]. For k = N, the approximation is exact; in practice for
well-behaved data sets, the result is exact for k << N.
"""
def __init__(self, n_neighbors=20,
edge_cutoff=0.9,
min_cluster_size=1):
self.n_neighbors = n_neighbors
self.edge_cutoff = edge_cutoff
self.min_cluster_size = min_cluster_size
def fit(self, X):
"""Fit the clustering model
Parameters
----------
X : array_like
the data to be clustered: shape = [n_samples, n_features]
"""
X = np.asarray(X, dtype=float)
self.X_train_ = X
# generate a sparse graph using the k nearest neighbors of each point
G = kneighbors_graph(X, n_neighbors=self.n_neighbors, mode='distance')
# Compute the minimum spanning tree of this graph
self.full_tree_ = minimum_spanning_tree(G, overwrite=True)
# Find the cluster labels
self.n_components_, self.labels_, self.cluster_graph_ =\
self.compute_clusters()
return self
def compute_clusters(self, edge_cutoff=None, min_cluster_size=None):
"""Compute the clusters given a trained tree
After fit() is called, this method may be called to obtain a
clustering result with a new edge_cutoff and min_cluster_size.
Parameters
----------
edge_cutoff : float, optional
specify a fraction of edges to keep when selecting clusters.
edge_cutoff should be between 0 and 1. If not specified,
self.edge_cutoff will be used.
min_cluster_size : int, optional
specify a minimum number of points per cluster. If not specified,
self.min_cluster_size will be used.
Returns
-------
n_components : int
the number of clusters found
labels : ndarray
the labels of each point. Labels range from -1 to
n_components_ - 1: points labeled -1 are in the background
(i.e. their clusters were smaller than min_cluster_size)
T_trunc : sparse matrix
the truncated minimum spanning tree
"""
if edge_cutoff is None:
edge_cutoff = self.edge_cutoff
if min_cluster_size is None:
min_cluster_size = self.min_cluster_size
if not hasattr(self, 'full_tree_'):
raise ValueError("must call fit() before calling "
"compute_clusters()")
T_trunc = self.full_tree_.copy()
# cut-off edges at the percentile given by edge_cutoff
cutoff = np.percentile(T_trunc.data, 100 * edge_cutoff)
T_trunc.data[T_trunc.data > cutoff] = 0
T_trunc.eliminate_zeros()
# find connected components
n_components, labels = connected_components(T_trunc, directed=False)
counts = np.bincount(labels)
# for all components with less than min_cluster_size points, set
# to background, and re-label the clusters
i_bg = np.where(counts < min_cluster_size)[0]
for i in i_bg:
labels[labels == i] = -1
if len(i_bg) > 0:
_, labels = np.unique(labels, return_inverse=True)
labels -= 1
n_components = labels.max() + 1
# eliminate links in T_trunc which are not clusters
Eye = sparse.eye(len(labels), len(labels))
Eye.data[0, labels < 0] = 0
T_trunc = Eye * T_trunc * Eye
return n_components, labels, T_trunc
def get_graph_segments(X, G):
"""Get graph segments for plotting a 2D graph
Parameters
----------
X : array_like
the data, of shape [n_samples, 2]
G : array_like or sparse graph
the [n_samples, n_samples] matrix encoding the graph of connectinons
on X
Returns
-------
x_coords, y_coords : ndarrays
the x and y coordinates for plotting the graph. They are of size
[2, n_links], and can be visualized using
``plt.plot(x_coords, y_coords, '-k')``
"""
X = np.asarray(X)
if (X.ndim != 2) or (X.shape[1] != 2):
raise ValueError('shape of X should be (n_samples, 2)')
G = sparse.coo_matrix(G)
A = X[G.row].T
B = X[G.col].T
x_coords = np.vstack([A[0], B[0]])
y_coords = np.vstack([A[1], B[1]])
return x_coords, y_coords
|
pauljohnleonard/MusicBox
|
refs/heads/master
|
attic/thirdparty/srcMMA/MMA/tweaks.py
|
1
|
# tweaks.py
"""
This module is an integeral part of the program
MMA - Musical Midi Accompaniment.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Bob van der Poel <bob@mellowood.ca>
This module has support for some simple default tweaks.
"""
from MMA.common import *
import MMA.pat
import MMA.midiC
import copy
def setTweak(ln):
""" Option tweaks. """
notopt, ln = opt2pair(ln)
if notopt:
error("Tweaks: expecting cmd=opt pairs, not '%s'." % ' '.join(notopt) )
for cmd, opt in ln:
cmd=cmd.upper()
if cmd == 'DEFAULTDRUM':
MMA.pat.defaultDrum = MMA.midiC.decodeVoice(opt)
elif cmd == 'DEFAULTVOICE':
MMA.pat.defaultVoice = MMA.midiC.decodeVoice(opt)
elif cmd == 'DIM':
from MMA.chordtable import chordlist
if opt == '3':
# this is so we can change the desc. (no non-standard)
chordlist['dim'] = (chordlist['dim3'][0], chordlist['dim3'][1], "Diminished triad")
elif opt == '7':
chordlist['dim'] = copy.deepcopy(chordlist['dim7'])
else:
error("Tweaks: DIM requires '3' or '7' arg, not '%s'." % opt)
else:
error("Tweaks: '%s' unknown command." % cmd)
|
rleadbetter/Sick-Beard
|
refs/heads/anime
|
lib/hachoir_parser/game/spider_man_video.py
|
90
|
"""
Parser for an obscure FMV file format: bin files from the game
"The Amazing Spider-Man vs. The Kingpin" (Sega CD)
Author: Mike Melanson
Creation date: 2006-09-30
File samples: http://samples.mplayerhq.hu/game-formats/spiderman-segacd-bin/
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import FieldSet, UInt32, String, RawBytes
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal
class Chunk(FieldSet):
tag_info = {
"CONF" : ("conf[]", None, "Configuration header"),
"AUDI" : ("audio[]", None, "Audio chunk"),
"SYNC" : ("sync[]", None, "Start of video frame data"),
"IVRA" : ("ivra[]", None, "Vector codebook (?)"),
"VRAM" : ("video[]", None, "Video RAM tile pattern"),
"CRAM" : ("color[]", None, "Color RAM (palette)"),
"CEND" : ("video_end[]", None, "End of video data"),
"MEND" : ("end_file", None, "End of file"),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["length"].value * 8
fourcc = self["fourcc"].value
if fourcc in self.tag_info:
self._name, self._parser, self._description = self.tag_info[fourcc]
else:
self._parser = None
self._description = "Unknown chunk: fourcc %s" % self["fourcc"].display
def createFields(self):
yield String(self, "fourcc", 4, "FourCC", charset="ASCII")
yield textHandler(UInt32(self, "length", "length"), hexadecimal)
size = self["length"].value - 8
if 0 < size:
if self._parser:
for field in self._parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
class SpiderManVideoFile(Parser):
PARSER_TAGS = {
"id": "spiderman_video",
"category": "game",
"file_ext": ("bin",),
"min_size": 8*8,
"description": "The Amazing Spider-Man vs. The Kingpin (Sega CD) FMV video"
}
endian = BIG_ENDIAN
def validate(self):
return (self.stream.readBytes(0, 4) == 'CONF')
def createFields(self):
while not self.eof:
yield Chunk(self, "chunk[]")
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_freitek_powerhouse_mk2.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_freitek_powerhouse_mk2.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_freitek_powerhouse_mk2_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
kwhinnery/node-workshop
|
refs/heads/master
|
challenge6/finished/node_modules/browserify/node_modules/insert-module-globals/node_modules/lexical-scope/node_modules/astw/node_modules/esprima-six/tools/generate-unicode-regex.py
|
341
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# By Yusuke Suzuki <utatane.tea@gmail.com>
# Modified by Mathias Bynens <http://mathiasbynens.be/>
# http://code.google.com/p/esprima/issues/detail?id=110
import sys
import string
import re
class RegExpGenerator(object):
def __init__(self, detector):
self.detector = detector
def generate_identifier_start(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_identifier_part(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_start(self):
r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_part(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_separator_space(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)]
return self._generate_range(r)
def _generate_range(self, r):
if len(r) == 0:
return '[]'
buf = []
start = r[0]
end = r[0]
predict = start + 1
r = r[1:]
for code in r:
if predict == code:
end = code
predict = code + 1
continue
else:
if start == end:
buf.append("\\u%04X" % start)
elif end == start + 1:
buf.append("\\u%04X\\u%04X" % (start, end))
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
start = code
end = code
predict = code + 1
if start == end:
buf.append("\\u%04X" % start)
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
return '[' + ''.join(buf) + ']'
class Detector(object):
def __init__(self, data):
self.data = data
def is_ascii(self, ch):
return ch < 0x80
def is_ascii_alpha(self, ch):
v = ch | 0x20
return v >= ord('a') and v <= ord('z')
def is_decimal_digit(self, ch):
return ch >= ord('0') and ch <= ord('9')
def is_octal_digit(self, ch):
return ch >= ord('0') and ch <= ord('7')
def is_hex_digit(self, ch):
v = ch | 0x20
return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f'))
def is_digit(self, ch):
return self.is_decimal_digit(ch) or self.data[ch] == 'Nd'
def is_ascii_alphanumeric(self, ch):
return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch)
def _is_non_ascii_identifier_start(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl'
def _is_non_ascii_identifier_part(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D
def is_separator_space(self, ch):
return self.data[ch] == 'Zs'
def is_white_space(self, ch):
return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch)
def is_line_terminator(self, ch):
return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch)
def is_line_or_paragraph_terminator(self, ch):
return ch == 0x2028 or ch == 0x2029
def is_identifier_start(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch)
return self._is_non_ascii_identifier_start(ch)
def is_identifier_part(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch)
return self._is_non_ascii_identifier_part(ch)
def analyze(source):
data = []
dictionary = {}
with open(source) as uni:
flag = False
first = 0
for line in uni:
d = string.split(line.strip(), ";")
val = int(d[0], 16)
if flag:
if re.compile("<.+, Last>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = False
for t in range(first, val+1):
dictionary[t] = str(d[2])
else:
raise "Database Exception"
else:
if re.compile("<.+, First>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = True
first = val
else:
dictionary[val] = str(d[2])
for i in range(0xFFFF + 1):
if dictionary.get(i) == None:
data.append("Un")
else:
data.append(dictionary[i])
return RegExpGenerator(Detector(data))
def main(source):
generator = analyze(source)
print generator.generate_non_ascii_identifier_start()
print generator.generate_non_ascii_identifier_part()
print generator.generate_non_ascii_separator_space()
if __name__ == '__main__':
main(sys.argv[1])
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/contrib/formtools/tests/wizard/namedwizardtests/urls.py
|
320
|
from django.conf.urls import patterns, url
from django.contrib.formtools.tests.wizard.namedwizardtests.forms import (
SessionContactWizard, CookieContactWizard, Page1, Page2, Page3, Page4)
def get_named_session_wizard():
return SessionContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_session',
done_step_name='nwiz_session_done'
)
def get_named_cookie_wizard():
return CookieContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_cookie',
done_step_name='nwiz_cookie_done'
)
urlpatterns = patterns('',
url(r'^nwiz_session/(?P<step>.+)/$', get_named_session_wizard(), name='nwiz_session'),
url(r'^nwiz_session/$', get_named_session_wizard(), name='nwiz_session_start'),
url(r'^nwiz_cookie/(?P<step>.+)/$', get_named_cookie_wizard(), name='nwiz_cookie'),
url(r'^nwiz_cookie/$', get_named_cookie_wizard(), name='nwiz_cookie_start'),
)
|
whaleygeek/pc_parser
|
refs/heads/master
|
build/yply/yply.py
|
1
|
#!/usr/local/bin/python
# yply.py
#
# Author: David Beazley (dave@dabeaz.com)
# Date : October 2, 2006
#
# Converts a UNIX-yacc specification file into a PLY-compatible
# specification. To use, simply do this:
#
# % python yply.py [-nocode] inputfile.y >myparser.py
#
# The output of this program is Python code. In the output,
# any C code in the original file is included, but is commented.
# If you use the -nocode option, then all of the C code in the
# original file is discarded.
#
# Disclaimer: This just an example I threw together in an afternoon.
# It might have some bugs. However, it worked when I tried it on
# a yacc-specified C++ parser containing 442 rules and 855 parsing
# states.
#
import sys
import ylex
import yparse
from ply import *
if len(sys.argv) == 1:
print "usage : yply.py [-nocode] inputfile"
raise SystemExit
if len(sys.argv) == 3:
if sys.argv[1] == '-nocode':
yparse.emit_code = 0
else:
print "Unknown option '%s'" % sys.argv[1]
raise SystemExit
filename = sys.argv[2]
else:
filename = sys.argv[1]
yacc.parse(open(filename).read())
print """
if __name__ == '__main__':
from ply import *
yacc.yacc()
"""
|
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/lib/Lib/encodings/mac_greek.py
|
593
|
""" Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-greek',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xb9' # 0x81 -> SUPERSCRIPT ONE
u'\xb2' # 0x82 -> SUPERSCRIPT TWO
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xb3' # 0x84 -> SUPERSCRIPT THREE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0384' # 0x8B -> GREEK TONOS
u'\xa8' # 0x8C -> DIAERESIS
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xa3' # 0x92 -> POUND SIGN
u'\u2122' # 0x93 -> TRADE MARK SIGN
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u2022' # 0x96 -> BULLET
u'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
u'\u2030' # 0x98 -> PER MILLE SIGN
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xa6' # 0x9B -> BROKEN BAR
u'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
u'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
u'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
u'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
u'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
u'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\xa7' # 0xAC -> SECTION SIGN
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xb0' # 0xAE -> DEGREE SIGN
u'\xb7' # 0xAF -> MIDDLE DOT
u'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
u'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
u'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
u'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
u'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
u'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
u'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
u'\xac' # 0xC2 -> NOT SIGN
u'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
u'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
u'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
u'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2015' # 0xD1 -> HORIZONTAL BAR
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
u'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
u'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03be' # 0xEA -> GREEK SMALL LETTER XI
u'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xED -> GREEK SMALL LETTER MU
u'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
u'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
u'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
u'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
u'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
u'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
wevoteeducation/WeVoteBase
|
refs/heads/master
|
import_export_maplight/admin.py
|
1
|
# import_export_maplight/admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.contrib import admin
# Register your models here.
|
Marilynmontu/final-competiton
|
refs/heads/master
|
Tools/LogAnalyzer/VehicleType.py
|
187
|
class VehicleType():
Plane = 17
Copter = 23
Rover = 37
# these should really be "Plane", "Copter" and "Rover", but many
# things use these values as triggers in their code:
VehicleTypeString = {
17: "ArduPlane",
23: "ArduCopter",
37: "ArduRover"
}
|
petrutlucian94/nova
|
refs/heads/master
|
nova/vnc/__init__.py
|
19
|
#!/usr/bin/env python
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for VNC Proxying."""
from oslo_config import cfg
vnc_opts = [
cfg.StrOpt('novncproxy_base_url',
default='http://127.0.0.1:6080/vnc_auto.html',
help='Location of VNC console proxy, in the form '
'"http://127.0.0.1:6080/vnc_auto.html"'),
cfg.StrOpt('xvpvncproxy_base_url',
default='http://127.0.0.1:6081/console',
help='Location of nova xvp VNC console proxy, in the form '
'"http://127.0.0.1:6081/console"'),
cfg.StrOpt('vncserver_listen',
default='127.0.0.1',
help='IP address on which instance vncservers should listen'),
cfg.StrOpt('vncserver_proxyclient_address',
default='127.0.0.1',
help='The address to which proxy clients '
'(like nova-xvpvncproxy) should connect'),
cfg.BoolOpt('vnc_enabled',
default=True,
help='Enable VNC related features'),
cfg.StrOpt('vnc_keymap',
default='en-us',
help='Keymap for VNC'),
]
CONF = cfg.CONF
CONF.register_opts(vnc_opts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.