repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
lancezlin/pyjs
|
refs/heads/master
|
examples/treeitemcheckbox/TreeItemTest.py
|
6
|
# Copyright (C) 2011 Vsevolod Fedorov <vsevolod.fedorov@gmail.com>
# License: Apache2
# demonstrates issue #628 http://code.google.com/p/pyjamas/issues/detail?id=638
import pyjd # this is dummy in pyjs.
from pyjamas import Window
from pyjamas.ui.Tree import Tree
from pyjamas.ui.TreeItem import TreeItem
from pyjamas.ui.CheckBox import CheckBox
from pyjamas.ui.RootPanel import RootPanel
def onCb1(sender):
Window.alert('onCb1 ' + str(sender) + str(sender.isChecked()))
def onCb2(sender):
Window.alert('onCb2 ' + str(sender) + str(sender.isChecked()))
def main():
root = RootPanel()
tree = Tree()
cb1 = CheckBox('test 1')
cb1.addClickListener(onCb1)
root.add(cb1)
cb2 = CheckBox('test 2')
cb2.addClickListener(onCb2)
item = TreeItem(cb2)
tree.addItem(item)
root.add(tree)
if __name__ == '__main__':
pyjd.setup("./public/TreeItemTest.html")
main()
pyjd.run()
|
171121130/SWI
|
refs/heads/master
|
venv/Lib/encodings/cp865.py
|
272
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp865',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00a4, # CURRENCY SIGN
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xa4' # 0x00af -> CURRENCY SIGN
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00af, # CURRENCY SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
ondrejmular/pcs
|
refs/heads/master
|
pcs_test/tier0/lib/commands/tag/test_tag_update.py
|
3
|
from unittest import TestCase
from pcs.common import reports
from pcs.lib.commands import tag as cmd_tag
from pcs_test.tier0.lib.commands.tag.tag_common import (
fixture_resouces_for_reference_ids,
fixture_tags_xml,
)
from pcs_test.tools import fixture
from pcs_test.tools.command_env import get_env_tools
# This class does not focusing on validation testing, there are validator tests
# for that in pcs_test.tier0.lib.cib.test_tag
class TestTagUpdate(TestCase):
# pylint: disable=too-many-public-methods
def setUp(self):
self.env_assist, self.config = get_env_tools(self)
self.config.runner.cib.load(
resources=fixture_resouces_for_reference_ids(
["e1", "e2", "e3", "a", "b"]
),
tags=fixture_tags_xml([("t", ["e1", "e2", "e3"])]),
)
def test_add_ids(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e1", "e2", "e3", "a", "b"])]),
)
cmd_tag.update(self.env_assist.get_env(), "t", ["a", "b"], [])
def test_add_ids_before(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["a", "b", "e1", "e2", "e3"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["a", "b"],
[],
adjacent_idref="e1",
)
def test_add_ids_after(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e1", "b", "a", "e2", "e3"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["b", "a"],
[],
adjacent_idref="e1",
put_after_adjacent=True,
)
def test_remove_ids(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e2"])]),
)
cmd_tag.update(self.env_assist.get_env(), "t", [], ["e1", "e3"])
def test_combination_add_remove(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e2", "a", "b"])]),
)
cmd_tag.update(self.env_assist.get_env(), "t", ["a", "b"], ["e1", "e3"])
def test_combination_add_before_remove(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["a", "b", "e2"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["a", "b"],
["e1", "e3"],
adjacent_idref="e2",
)
def test_combination_add_after_remove(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e2", "a", "b"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["a", "b"],
["e1", "e3"],
adjacent_idref="e2",
put_after_adjacent=True,
)
def test_move_existing_before(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e2", "e3", "e1"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["e2", "e3"],
[],
adjacent_idref="e1",
)
def test_move_existing_after(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e2", "e3", "e1"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["e1"],
[],
adjacent_idref="e3",
put_after_adjacent=True,
)
def test_move_new_and_existing_before(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e2", "a", "e3", "b", "e1"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["e2", "a", "e3", "b"],
[],
adjacent_idref="e1",
)
def test_move_new_and_existing_after(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e1", "e2", "b", "a", "e3"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["b", "a", "e3"],
[],
adjacent_idref="e2",
put_after_adjacent=True,
)
def test_move_new_and_existing_before_and_remove(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["a", "b", "e2", "e3"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["a", "b"],
["e1"],
adjacent_idref="e2",
)
def test_move_new_and_existing_after_and_remove(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["e1", "e2", "b", "a"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["b", "a"],
["e3"],
adjacent_idref="e2",
put_after_adjacent=True,
)
def test_remove_all_existing_but_add_new_ones(self):
self.config.env.push_cib(
tags=fixture_tags_xml([("t", ["a", "b"])]),
)
cmd_tag.update(
self.env_assist.get_env(),
"t",
["a", "b"],
["e1", "e2", "e3"],
)
self.env_assist.assert_reports([])
def test_raises_exeption_in_case_of_report(self):
self.env_assist.assert_raise_library_error(
lambda: cmd_tag.update(
self.env_assist.get_env(),
"t",
[],
["e1", "e2", "e3"],
)
)
self.env_assist.assert_reports(
[
fixture.error(
# pylint: disable=line-too-long
reports.codes.TAG_CANNOT_REMOVE_REFERENCES_WITHOUT_REMOVING_TAG,
tag_id="t",
)
]
)
|
anentropic/django-exclusivebooleanfield
|
refs/heads/master
|
tests/py26-dj13_testproject/settings.py
|
4
|
# Django settings for dj13_testproject project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'z5w0h)xf952mf-*kfq4-g^_r$e1qsyhlmu%)h(7z%*%7+p2y1@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'dj13_testproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'testapp',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
PinkInk/esp32
|
refs/heads/master
|
lib/main.py
|
1
|
import machine, ssd1306
d = ssd1306.SSD1306(machine.I2C(scl=machine.Pin(4), sda=machine.Pin(5)))
|
aagm/gfw-api
|
refs/heads/master
|
lib/ee_/imagecollection.py
|
5
|
"""Representation for an Earth Engine ImageCollection."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import apifunction
import collection
import computedobject
import ee_exception
import ee_types
import image
class ImageCollection(collection.Collection):
"""Representation for an Earth Engine ImageCollection."""
_initialized = False
def __init__(self, args):
"""ImageCollection constructor.
Args:
args: ImageCollections can be constructed from the following arguments:
1) A string: assumed to be the name of a collection,
2) An array of images, or anything that can be used to construct an
image.
3) A single image.
5) A computed object - reinterpreted as a collection.
Raises:
EEException: if passed something other than the above.
"""
self.initialize()
# Wrap single images in an array.
if isinstance(args, image.Image):
args = [args]
if ee_types.isString(args):
# An ID.
super(ImageCollection, self).__init__(
apifunction.ApiFunction.lookup('ImageCollection.load'), {'id': args})
elif isinstance(args, (list, tuple)):
# A list of images.
super(ImageCollection, self).__init__(
apifunction.ApiFunction.lookup('ImageCollection.fromImages'), {
'images': [image.Image(i) for i in args]
})
elif isinstance(args, computedobject.ComputedObject):
# A custom object to reinterpret as a ImageCollection.
super(ImageCollection, self).__init__(args.func, args.args)
else:
raise ee_exception.EEException(
'Unrecognized argument type to convert to a ImageCollection: %s' %
args)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
super(ImageCollection, cls).initialize()
apifunction.ApiFunction.importApi(
cls, 'ImageCollection', 'ImageCollection')
apifunction.ApiFunction.importApi(
cls, 'reduce', 'ImageCollection')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def getMapId(self, vis_params=None):
"""Fetch and return a MapID.
This mosaics the collection to a single image and return a mapid suitable
for building a Google Maps overlay.
Args:
vis_params: The visualization parameters.
Returns:
A mapid and token.
"""
mosaic = apifunction.ApiFunction.call_('ImageCollection.mosaic', self)
return mosaic.getMapId(vis_params)
def map(self, algorithm):
"""Maps an algorithm over a collection. See ee.Collection.mapInternal()."""
return self.mapInternal(image.Image, algorithm)
def select(self, selectors, opt_names=None, *args):
"""Select bands from each image in a collection.
Args:
selectors: An array of names, regexes or numeric indices specifying
the bands to select.
opt_names: An array of strings specifying the new names for the
selected bands. If supplied, the length must match the number
of bands selected.
*args: Selector elements as varargs.
Returns:
The image collection with selected bands.
"""
return self.map(lambda img: img.select(selectors, opt_names, *args))
@staticmethod
def name():
return 'ImageCollection'
|
Kast0rTr0y/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/zfs/zfs.py
|
37
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems, volumes, clones and snapshots.
version_added: "1.1"
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs)
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: ['present', 'absent']
required: true
origin:
description:
- Snapshot from which to create a clone
default: null
required: false
key_value:
description:
- The C(zfs) module takes key=value pairs for zfs properties to be set. See the zfs(8) man page for more information.
default: null
required: false
author: "Johan Wiren (@johanwiren)"
'''
EXAMPLES = '''
# Create a new file system called myfs in pool rpool with the setuid property turned off
- zfs:
name: rpool/myfs
state: present
setuid: off
# Create a new volume called myvol in pool rpool.
- zfs:
name: rpool/myvol
state: present
volsize: 10M
# Create a snapshot of rpool/myfs file system.
- zfs:
name: rpool/myfs@mysnapshot
state: present
# Create a new file system called myfs2 with snapdir enabled
- zfs:
name: rpool/myfs2
state: present
snapdir: enabled
# Create a new file system by cloning a snapshot
- zfs:
name: rpool/cloned_fs
state: present
origin: rpool/myfs@mysnapshot
# Destroy a filesystem
- zfs:
name: rpool/myfs
state: absent
'''
import os
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.zfs_cmd = module.get_bin_path('zfs', True)
self.zpool_cmd = module.get_bin_path('zpool', True)
self.pool = name.split('/')[0]
self.is_solaris = os.uname()[0] == 'SunOS'
self.is_openzfs = self.check_openzfs()
self.enhanced_sharing = self.check_enhanced_sharing()
def check_openzfs(self):
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if version == '-':
return True
if int(version) == 5000:
return True
return False
def check_enhanced_sharing(self):
if self.is_solaris and not self.is_openzfs:
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if int(version) >= 34:
return True
return False
def exists(self):
cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties = self.properties
volsize = properties.pop('volsize', None)
volblocksize = properties.pop('volblocksize', None)
origin = properties.pop('origin', None)
cmd = [self.zfs_cmd]
if "@" in self.name:
action = 'snapshot'
elif origin:
action = 'clone'
else:
action = 'create'
cmd.append(action)
if action in ['create', 'clone']:
cmd += ['-p']
if volsize:
cmd += ['-V', volsize]
if volblocksize:
cmd += ['-b', 'volblocksize']
if properties:
for prop, value in properties.items():
cmd += ['-o', '%s="%s"' % (prop, value)]
if origin:
cmd.append(origin)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.items():
if current_properties.get(prop, None) != value:
self.set_property(prop, value)
def get_current_properties(self):
cmd = [self.zfs_cmd, 'get', '-H']
if self.enhanced_sharing:
cmd += ['-e']
cmd += ['all', self.name]
rc, out, err = self.module.run_command(" ".join(cmd))
properties = dict()
for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
if source == 'local':
properties[prop] = value
# Add alias for enhanced sharing properties
if self.enhanced_sharing:
properties['sharenfs'] = properties.get('share.nfs', None)
properties['sharesmb'] = properties.get('share.smb', None)
return properties
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(type='str', required=True),
state = dict(type='str', required=True, choices=['present', 'absent']),
# No longer used. Kept here to not interfere with zfs properties
createparent = dict(type='bool', required=False)
),
supports_check_mode=True,
check_invalid_arguments=False
)
state = module.params.pop('state')
name = module.params.pop('name')
# Get all valid zfs-properties
properties = dict()
for prop, value in module.params.items():
# All freestyle params are zfs properties
if prop not in module.argument_spec:
# Reverse the boolification of freestyle zfs properties
if isinstance(value, bool):
if value is True:
properties[prop] = 'on'
else:
properties[prop] = 'off'
else:
properties[prop] = value
result = {}
result['name'] = name
result['state'] = state
zfs = Zfs(module, name, properties)
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
chirilo/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/QueueStatusServer/model/queuepropertymixin.py
|
143
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class QueuePropertyMixin(object):
def _queue_getter(self):
# Import at runtime to avoid circular imports
from model.queues import Queue
return Queue.queue_with_name(self.queue_name)
def _queue_setter(self, queue):
self.queue_name = queue.name() if queue else None
queue = property(_queue_getter, _queue_setter)
|
alexherns/biotite-scripts
|
refs/heads/master
|
taxo_table_parser.py
|
1
|
#!/usr/bin/env python2.7
import sys, getopt
def usage():
print '''
Useful script for extracting lines of interest from taxonomy table file as downloaded from ggkbase
Usage: taxo_table_parser.py <tab_sep_taxo_table> <options>
-h, --help Print this help dialog
<ARGUMENTS>
--min_gc=<float> Default 0
--max_gc=<float> Default 100
--min_cov=<float> Default 0
--max_cov=<float> Default "inf"
--min_size=<float> Default 0
--tax_def=<string> e.g. species, genus, or class
--tax_query=<string> e.g. Geobacter uraniireducens
--tax_cutoff=<float> Default 0
--exact
'''
exit()
opts, args = getopt.getopt(sys.argv[2:], 'h', ['help', 'min_gc=', 'max_gc=', 'min_cov=', 'max_cov=', 'min_size=', 'tax_def=', 'tax_query=', 'tax_cutoff=', 'exact'])
min_gc = 0.
max_gc = 100.
min_cov = 0.
max_cov = float("inf")
min_size = 0.
tax_def, tax_query, tax_cutoff = [None, None, 0.]
exact= False
for o, a in opts:
if o in ('-h', '--help'):
usage()
elif o == '--min_gc':
min_gc = float(a)
elif o == '--max_gc':
max_gc = float(a)
elif o == '--min_cov':
min_cov = float(a)
elif o == '--max_cov':
max_cov = float(a)
elif o == '--min_size':
min_size = float(a)
elif o == '--min_gc':
min_gc = float(a)
elif o == '--tax_def':
tax_def = a
elif o == '--tax_query':
tax_query = a
elif o == '--tax_cutoff':
tax_cutoff = a
elif o == '--exact':
exact= True
print exact
if (tax_def is not None and tax_query is None) or (tax_query is not None and tax_def is None):
sys.stderr.write("ERROR: If selecting by taxonomy, you MUST provide both a taxonomic group to search and a string to identify")
exit()
class contig:
def __init__(self, contig_detail_list):
name, size, coverage, gc, taxo_winner, taxo_winner_pc, \
species_winner, species_winner_pc, genus_winner, genus_winner_pc, \
order_winner, order_winner_pc, class_winner, class_winner_pc, \
phylum_winner, phylum_winner_pc, domain_winner, domain_winner_pc = contig_detail_list
self.name = name
self.size = float(size)
self.coverage = float(coverage)/100
self.gc = float(gc)/100
if taxo_winner == '':
self.taxo_winner = None
self.taxo_winner_pc = None
else:
self.taxo_winner = taxo_winner
self.taxo_winner_pc = float(taxo_winner_pc)
self.species_winner = species_winner
self.species_winner_pc = float(species_winner_pc)
self.genus_winner = genus_winner
self.genus_winner_pc = float(genus_winner_pc)
self.order_winner = order_winner
self.order_winner_pc = float(order_winner_pc)
self.class_winner = class_winner
self.class_winner_pc = float(class_winner_pc)
self.phylum_winner = phylum_winner
self.phylum_winner_pc = float(phylum_winner_pc)
self.domain_winner = domain_winner
self.domain_winner_pc = float(domain_winner_pc)
def toString(self):
return '\t'.join([self.name, str(self.size), str(self.coverage), str(self.gc), str(self.taxo_winner), str(self.taxo_winner_pc), \
self.species_winner, str(self.species_winner_pc), self.genus_winner, str(self.genus_winner_pc), \
self.order_winner, str(self.order_winner_pc), self.class_winner, str(self.class_winner_pc), \
self.phylum_winner, str(self.phylum_winner_pc), self.domain_winner, str(self.domain_winner_pc)])
contig_list = []
print "Contig name Size (bp) Coverage GC % Taxonomy winner Winner % Species winner Species winner % Genus winner Genus winner % Order winner Order winner % Class winner Class winner % Phylum winner Phylum winner % Domain winner Domain winner %"
for i, line in enumerate(open(sys.argv[1],'r')):
if line.strip().split()[0] == 'Contig':
continue
contig_detail_list = line.strip().split('\t')
if len(contig_detail_list)<18:
sys.stderr.write("Contig {0} contains too few parameters to initialize\n".format(contig_detail_list[0]))
continue
this_contig = contig(contig_detail_list)
if this_contig.gc >= min_gc and this_contig.gc <= max_gc \
and this_contig.coverage >= min_cov and this_contig.coverage <= max_cov \
and this_contig.size >= min_size:
if tax_def is not None:
if exact:
exec("if this_contig.{0}_winner == '{1}' and this_contig.{0}_winner_pc >= {2}:\n\tprint this_contig.toString()\n\tcontig_list.append(this_contig)".format(tax_def, tax_query, tax_cutoff))
else:
exec("if '{1}' in this_contig.{0}_winner and this_contig.{0}_winner_pc >= {2}:\n\tprint this_contig.toString()\n\tcontig_list.append(this_contig)".format(tax_def, tax_query, tax_cutoff))
else:
print this_contig.toString()
contig_list.append(this_contig)
print "Total Size:\t{0}\tNumber of contigs:{1}".format(sum([this_contig.size for this_contig in contig_list]), len(contig_list))
|
xaviercobain88/framework-python
|
refs/heads/master
|
build/lib.linux-i686-2.7/openerp/addons/sale_order_dates/sale_order_dates.py
|
47
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
class sale_order_dates(osv.osv):
_inherit = 'sale.order'
def _get_effective_date(self, cr, uid, ids, name, arg, context=None):
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for pick in order.picking_ids:
dates_list.append(pick.date)
if dates_list:
res[order.id] = min(dates_list)
else:
res[order.id] = False
return res
def _get_commitment_date(self, cr, uid, ids, name, arg, context=None):
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for line in order.order_line:
dt = datetime.strptime(order.date_order, '%Y-%m-%d') + relativedelta(days=line.delay or 0.0)
dt_s = dt.strftime('%Y-%m-%d')
dates_list.append(dt_s)
if dates_list:
res[order.id] = min(dates_list)
return res
_columns = {
'commitment_date': fields.function(_get_commitment_date, store=True, type='date', string='Commitment Date', help="Committed date for delivery."),
'requested_date': fields.date('Requested Date', help="Date requested by the customer for the sale."),
'effective_date': fields.function(_get_effective_date, type='date', store=True, string='Effective Date',help="Date on which picking is created."),
}
sale_order_dates()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
0sc0d3r/enigma2
|
refs/heads/master
|
lib/python/Components/Converter/ConverterRotator.py
|
68
|
#
# ConverterRotator Converter for Enigma2 (ConverterRotator.py)
# Coded by vlamo (c) 2012
#
# Version: 0.1 (26.01.2012 04:05)
# Support: http://dream.altmaster.net/
#
from Converter import Converter
from Poll import Poll
from Components.Element import cached
class ConverterRotator(Poll, Converter, object):
"""Static Text Converter Rotator"""
def __init__(self, type):
Poll.__init__(self)
Converter.__init__(self, type)
self.mainstream = None
self.sourceList = [ ]
self.sourceIndex = -1
if type and type.isdigit():
self.poll_interval = int(type) * 1000
def poll(self):
self.sourceIndex = (self.sourceIndex + 1) % len(self.sourceList)
self.downstream_elements.changed((self.CHANGED_POLL,))
def doSuspend(self, suspended):
if self.mainstream and len(self.sourceList) != 0:
if suspended:
self.poll_enabled = False
else:
self.sourceIndex = len(self.sourceList)-1
self.poll_enabled = True
self.poll()
@cached
def getText(self):
result = ""
if self.poll_enabled:
prev_source = self.sourceList[self.sourceIndex][0].source
self.sourceList[self.sourceIndex][0].source = self.mainstream
result = self.sourceList[self.sourceIndex][0].text or ""
self.sourceList[self.sourceIndex][0].source = prev_source
return result
text = property(getText)
def changed(self, what, parent=None):
if what[0] == self.CHANGED_DEFAULT and not len(self.sourceList):
upstream = self.source
while upstream:
self.sourceList.insert(0, (upstream, hasattr(upstream, 'poll_enabled')))
upstream = upstream.source
if len(self.sourceList):
self.mainstream = self.sourceList.pop(0)[0]
#if what[0] == self.CHANGED_POLL and \
# self.poll_enabled and \
# not self.sourceList[self.sourceIndex][1]:
# return
self.downstream_elements.changed(what)
|
gibxxi/nzbToMedia
|
refs/heads/master
|
libs/unidecode/x0c0.py
|
253
|
data = (
'bbweok', # 0x00
'bbweot', # 0x01
'bbweop', # 0x02
'bbweoh', # 0x03
'bbwe', # 0x04
'bbweg', # 0x05
'bbwegg', # 0x06
'bbwegs', # 0x07
'bbwen', # 0x08
'bbwenj', # 0x09
'bbwenh', # 0x0a
'bbwed', # 0x0b
'bbwel', # 0x0c
'bbwelg', # 0x0d
'bbwelm', # 0x0e
'bbwelb', # 0x0f
'bbwels', # 0x10
'bbwelt', # 0x11
'bbwelp', # 0x12
'bbwelh', # 0x13
'bbwem', # 0x14
'bbweb', # 0x15
'bbwebs', # 0x16
'bbwes', # 0x17
'bbwess', # 0x18
'bbweng', # 0x19
'bbwej', # 0x1a
'bbwec', # 0x1b
'bbwek', # 0x1c
'bbwet', # 0x1d
'bbwep', # 0x1e
'bbweh', # 0x1f
'bbwi', # 0x20
'bbwig', # 0x21
'bbwigg', # 0x22
'bbwigs', # 0x23
'bbwin', # 0x24
'bbwinj', # 0x25
'bbwinh', # 0x26
'bbwid', # 0x27
'bbwil', # 0x28
'bbwilg', # 0x29
'bbwilm', # 0x2a
'bbwilb', # 0x2b
'bbwils', # 0x2c
'bbwilt', # 0x2d
'bbwilp', # 0x2e
'bbwilh', # 0x2f
'bbwim', # 0x30
'bbwib', # 0x31
'bbwibs', # 0x32
'bbwis', # 0x33
'bbwiss', # 0x34
'bbwing', # 0x35
'bbwij', # 0x36
'bbwic', # 0x37
'bbwik', # 0x38
'bbwit', # 0x39
'bbwip', # 0x3a
'bbwih', # 0x3b
'bbyu', # 0x3c
'bbyug', # 0x3d
'bbyugg', # 0x3e
'bbyugs', # 0x3f
'bbyun', # 0x40
'bbyunj', # 0x41
'bbyunh', # 0x42
'bbyud', # 0x43
'bbyul', # 0x44
'bbyulg', # 0x45
'bbyulm', # 0x46
'bbyulb', # 0x47
'bbyuls', # 0x48
'bbyult', # 0x49
'bbyulp', # 0x4a
'bbyulh', # 0x4b
'bbyum', # 0x4c
'bbyub', # 0x4d
'bbyubs', # 0x4e
'bbyus', # 0x4f
'bbyuss', # 0x50
'bbyung', # 0x51
'bbyuj', # 0x52
'bbyuc', # 0x53
'bbyuk', # 0x54
'bbyut', # 0x55
'bbyup', # 0x56
'bbyuh', # 0x57
'bbeu', # 0x58
'bbeug', # 0x59
'bbeugg', # 0x5a
'bbeugs', # 0x5b
'bbeun', # 0x5c
'bbeunj', # 0x5d
'bbeunh', # 0x5e
'bbeud', # 0x5f
'bbeul', # 0x60
'bbeulg', # 0x61
'bbeulm', # 0x62
'bbeulb', # 0x63
'bbeuls', # 0x64
'bbeult', # 0x65
'bbeulp', # 0x66
'bbeulh', # 0x67
'bbeum', # 0x68
'bbeub', # 0x69
'bbeubs', # 0x6a
'bbeus', # 0x6b
'bbeuss', # 0x6c
'bbeung', # 0x6d
'bbeuj', # 0x6e
'bbeuc', # 0x6f
'bbeuk', # 0x70
'bbeut', # 0x71
'bbeup', # 0x72
'bbeuh', # 0x73
'bbyi', # 0x74
'bbyig', # 0x75
'bbyigg', # 0x76
'bbyigs', # 0x77
'bbyin', # 0x78
'bbyinj', # 0x79
'bbyinh', # 0x7a
'bbyid', # 0x7b
'bbyil', # 0x7c
'bbyilg', # 0x7d
'bbyilm', # 0x7e
'bbyilb', # 0x7f
'bbyils', # 0x80
'bbyilt', # 0x81
'bbyilp', # 0x82
'bbyilh', # 0x83
'bbyim', # 0x84
'bbyib', # 0x85
'bbyibs', # 0x86
'bbyis', # 0x87
'bbyiss', # 0x88
'bbying', # 0x89
'bbyij', # 0x8a
'bbyic', # 0x8b
'bbyik', # 0x8c
'bbyit', # 0x8d
'bbyip', # 0x8e
'bbyih', # 0x8f
'bbi', # 0x90
'bbig', # 0x91
'bbigg', # 0x92
'bbigs', # 0x93
'bbin', # 0x94
'bbinj', # 0x95
'bbinh', # 0x96
'bbid', # 0x97
'bbil', # 0x98
'bbilg', # 0x99
'bbilm', # 0x9a
'bbilb', # 0x9b
'bbils', # 0x9c
'bbilt', # 0x9d
'bbilp', # 0x9e
'bbilh', # 0x9f
'bbim', # 0xa0
'bbib', # 0xa1
'bbibs', # 0xa2
'bbis', # 0xa3
'bbiss', # 0xa4
'bbing', # 0xa5
'bbij', # 0xa6
'bbic', # 0xa7
'bbik', # 0xa8
'bbit', # 0xa9
'bbip', # 0xaa
'bbih', # 0xab
'sa', # 0xac
'sag', # 0xad
'sagg', # 0xae
'sags', # 0xaf
'san', # 0xb0
'sanj', # 0xb1
'sanh', # 0xb2
'sad', # 0xb3
'sal', # 0xb4
'salg', # 0xb5
'salm', # 0xb6
'salb', # 0xb7
'sals', # 0xb8
'salt', # 0xb9
'salp', # 0xba
'salh', # 0xbb
'sam', # 0xbc
'sab', # 0xbd
'sabs', # 0xbe
'sas', # 0xbf
'sass', # 0xc0
'sang', # 0xc1
'saj', # 0xc2
'sac', # 0xc3
'sak', # 0xc4
'sat', # 0xc5
'sap', # 0xc6
'sah', # 0xc7
'sae', # 0xc8
'saeg', # 0xc9
'saegg', # 0xca
'saegs', # 0xcb
'saen', # 0xcc
'saenj', # 0xcd
'saenh', # 0xce
'saed', # 0xcf
'sael', # 0xd0
'saelg', # 0xd1
'saelm', # 0xd2
'saelb', # 0xd3
'saels', # 0xd4
'saelt', # 0xd5
'saelp', # 0xd6
'saelh', # 0xd7
'saem', # 0xd8
'saeb', # 0xd9
'saebs', # 0xda
'saes', # 0xdb
'saess', # 0xdc
'saeng', # 0xdd
'saej', # 0xde
'saec', # 0xdf
'saek', # 0xe0
'saet', # 0xe1
'saep', # 0xe2
'saeh', # 0xe3
'sya', # 0xe4
'syag', # 0xe5
'syagg', # 0xe6
'syags', # 0xe7
'syan', # 0xe8
'syanj', # 0xe9
'syanh', # 0xea
'syad', # 0xeb
'syal', # 0xec
'syalg', # 0xed
'syalm', # 0xee
'syalb', # 0xef
'syals', # 0xf0
'syalt', # 0xf1
'syalp', # 0xf2
'syalh', # 0xf3
'syam', # 0xf4
'syab', # 0xf5
'syabs', # 0xf6
'syas', # 0xf7
'syass', # 0xf8
'syang', # 0xf9
'syaj', # 0xfa
'syac', # 0xfb
'syak', # 0xfc
'syat', # 0xfd
'syap', # 0xfe
'syah', # 0xff
)
|
raildo/python-keystoneclient
|
refs/heads/master
|
keystoneclient/openstack/common/jsonutils.py
|
3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from keystoneclient.openstack.common import gettextutils
from keystoneclient.openstack.common import importutils
from keystoneclient.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
ekwoodrich/nirha
|
refs/heads/master
|
nirhalib/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/hebrewprober.py
|
2928
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
Perkville/django-autocomplete-light
|
refs/heads/v2
|
test_project/select2_generic_m2m/test_functional.py
|
3
|
from dal.test import case, stories
from dal_select2.test import Select2Story
from .models import TestModel
class AdminGenericM2MBase(object):
field_name = 'test'
inline_related_name = 'inline_test_models'
def setUp(self):
super(AdminGenericM2MBase, self).setUp()
self.get(url=self.get_modeladmin_url('add'))
self.fill_name()
def test_can_select_option(self):
option0, ctype0 = self.create_option()
option1, ctype1 = self.create_option()
story = stories.SelectOptionMultiple(self)
story.select_option(option0.name)
story.select_option(option1.name)
story.assert_selection_persists(
(
'%s-%s' % (ctype0.pk, option0.pk),
'%s-%s' % (ctype1.pk, option1.pk)
),
(
option0.name,
option1.name,
),
)
class AdminGenericM2MTestCase(AdminGenericM2MBase,
Select2Story,
case.AdminMixin,
case.ContentTypeOptionMixin,
case.AutocompleteTestCase):
model = TestModel
|
wberrier/meson
|
refs/heads/master
|
test cases/windows/8 msvc dll versioning/copyfile.py
|
100
|
#!/usr/bin/env python3
import sys
import shutil
shutil.copyfile(sys.argv[1], sys.argv[2])
|
technologiescollege/Blockly-rduino-communication
|
refs/heads/master
|
scripts_XP/Lib/site-packages/jinja2/constants.py
|
220
|
# -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
|
cuker/python-usps
|
refs/heads/master
|
usps/addressinformation/base.py
|
1
|
'''
See http://www.usps.com/webtools/htm/Address-Information.htm for complete documentation of the API
'''
import urllib, urllib2
try:
from xml.etree import ElementTree as ET
except ImportError:
from elementtree import ElementTree as ET
def utf8urlencode(data):
ret = dict()
for key, value in data.iteritems():
ret[key] = value.encode('utf8')
return urllib.urlencode(ret)
def dicttoxml(dictionary, parent, tagname, attributes=None):
element = ET.SubElement(parent, tagname)
if attributes: #USPS likes things in a certain order!
for key in attributes:
ET.SubElement(element, key).text = dictionary.get(key, '')
else:
for key, value in dictionary.iteritems():
ET.SubElement(element, key).text = value
return element
def xmltodict(element):
ret = dict()
for item in element.getchildren():
ret[item.tag] = item.text
return ret
class USPSXMLError(Exception):
def __init__(self, element):
self.info = xmltodict(element)
super(USPSXMLError, self).__init__(self.info['Description'])
class USPSAddressService(object):
SERVICE_NAME = None
API = None
CHILD_XML_NAME = None
PARAMETERS = None
def __init__(self, url):
self.url = url
def submit_xml(self, xml):
data = {'XML':ET.tostring(xml),
'API':self.API}
response = urllib2.urlopen(self.url, utf8urlencode(data))
root = ET.parse(response).getroot()
if root.tag == 'Error':
raise USPSXMLError(root)
error = root.find('.//Error')
if error:
raise USPSXMLError(error)
return root
def parse_xml(self, xml):
items = list()
for item in xml.getchildren():#xml.findall(self.SERVICE_NAME+'Response'):
items.append(xmltodict(item))
return items
def make_xml(self, userid, addresses):
root = ET.Element(self.SERVICE_NAME+'Request')
root.attrib['USERID'] = userid
index = 0
for address_dict in addresses:
address_xml = dicttoxml(address_dict, root, self.CHILD_XML_NAME, self.PARAMETERS)
address_xml.attrib['ID'] = str(index)
index += 1
return root
def execute(self, userid, addresses):
xml = self.make_xml(userid, addresses)
return self.parse_xml(self.submit_xml(xml))
class AddressValidate(USPSAddressService):
SERVICE_NAME = 'AddressValidate'
CHILD_XML_NAME = 'Address'
API = 'Verify'
PARAMETERS = ['FirmName',
'Address1',
'Address2',
'City',
'State',
'Zip5',
'Zip4',]
class ZipCodeLookup(USPSAddressService):
SERVICE_NAME = 'ZipCodeLookup'
CHILD_XML_NAME = 'Address'
API = 'ZipCodeLookup'
PARAMETERS = ['FirmName',
'Address1',
'Address2',
'City',
'State',]
class CityStateLookup(USPSAddressService):
SERVICE_NAME = 'CityStateLookup'
CHILD_XML_NAME = 'ZipCode'
API = 'CityStateLookup'
PARAMETERS = ['Zip5',]
|
tensorflow/tpu
|
refs/heads/master
|
tools/dataset_profiler/__init__.py
|
1
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
lancezlin/ml_template_py
|
refs/heads/master
|
lib/python2.7/site-packages/numpy/polynomial/chebyshev.py
|
30
|
"""
Objects for dealing with Chebyshev series.
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `chebdomain` -- Chebyshev series default domain, [-1,1].
- `chebzero` -- (Coefficients of the) Chebyshev series that evaluates
identically to 0.
- `chebone` -- (Coefficients of the) Chebyshev series that evaluates
identically to 1.
- `chebx` -- (Coefficients of the) Chebyshev series for the identity map,
``f(x) = x``.
Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
- `chebpow` -- raise a Chebyshev series to an positive integer power
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
- `chebgrid2d` -- evaluate a 2D Chebyshev series on a Cartesian product.
- `chebgrid3d` -- evaluate a 3D Chebyshev series on a Cartesian product.
Calculus
--------
- `chebder` -- differentiate a Chebyshev series.
- `chebint` -- integrate a Chebyshev series.
Misc Functions
--------------
- `chebfromroots` -- create a Chebyshev series with specified roots.
- `chebroots` -- find the roots of a Chebyshev series.
- `chebvander` -- Vandermonde-like matrix for Chebyshev polynomials.
- `chebvander2d` -- Vandermonde-like matrix for 2D power series.
- `chebvander3d` -- Vandermonde-like matrix for 3D power series.
- `chebgauss` -- Gauss-Chebyshev quadrature, points and weights.
- `chebweight` -- Chebyshev weight function.
- `chebcompanion` -- symmetrized companion matrix in Chebyshev form.
- `chebfit` -- least-squares fit returning a Chebyshev series.
- `chebpts1` -- Chebyshev points of the first kind.
- `chebpts2` -- Chebyshev points of the second kind.
- `chebtrim` -- trim leading coefficients from a Chebyshev series.
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
Classes
-------
- `Chebyshev` -- A Chebyshev series class.
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
len1 = len(z1)
len2 = len(z2)
if len2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif len1 < len2:
return z1[:1]*0, z1
else:
dlen = len1 - len2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
z1[j:j+len2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+len2] -= tmp
quo /= scl
rem = z1[i+1:i-1+len2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
>>> P.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [chebline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = chebmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([ 0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmul, chebdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([ 14., 12., 24.])
>>> C.chebder(c,3)
array([ 96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([ 12., 96.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
return c
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
return c
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
c = chebval(y, c, tensor=False)
c = chebval(z, c, tensor=False)
return c
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
c = chebval(x, c)
c = chebval(y, c)
c = chebval(z, c)
return c
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.rollaxis(v, 0, v.ndim)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d. chebval2d, chebval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = chebvander(x, degx)
vy = chebvander(y, degy)
vz = chebvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For Numpy versions >= 1.11 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = chebvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = chebvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax + 1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = chebcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \cos(\pi (2 i - 1) / (2 n))
.. math:: w_i = \pi / n
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
|
puttarajubr/commcare-hq
|
refs/heads/master
|
hqscripts/models.py
|
7
|
# This file is only here so that django will recognize that
# this is a valid app and run the associated unit tests.
|
peter1000/subuser
|
refs/heads/master
|
logic/subuserlib/classes/describable.py
|
1
|
#!/usr/bin/env python
# This file should be compatible with both Python 2 and 3.
# If it is not, please file a bug report.
"""
If you want to print a description of an object to standard output, make that object describable.
"""
#external imports
import abc
#internal imports
#import ...
class Describable(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def describe(self):
""" Print out a human readable description of the object. """
pass
|
namccart/gnuradio
|
refs/heads/master
|
gr-blocks/python/blocks/qa_regenerate.py
|
57
|
#!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_regenerate(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_regen1(self):
tb = self.tb
data = [0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
expected_result = (0, 0, 0,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
src = blocks.vector_source_b(data, False)
regen = blocks.regenerate_bb(5, 2)
dst = blocks.vector_sink_b()
tb.connect(src, regen)
tb.connect(regen, dst)
tb.run()
dst_data = dst.data()
self.assertEqual(expected_result, dst_data)
def test_regen2(self):
tb = self.tb
data = 200*[0,]
data[9] = 1
data[99] = 1
expected_result = 200*[0,]
expected_result[9] = 1
expected_result[19] = 1
expected_result[29] = 1
expected_result[39] = 1
expected_result[99] = 1
expected_result[109] = 1
expected_result[119] = 1
expected_result[129] = 1
src = blocks.vector_source_b(data, False)
regen = blocks.regenerate_bb(10, 3)
dst = blocks.vector_sink_b()
tb.connect(src, regen)
tb.connect(regen, dst)
tb.run ()
dst_data = dst.data()
self.assertEqual(tuple(expected_result), dst_data)
if __name__ == '__main__':
gr_unittest.run(test_regenerate, "test_regenerate.xml")
|
rruebner/odoo
|
refs/heads/master
|
addons/mrp/report/workcenter_load.py
|
437
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render import render
from openerp.report.interface import report_int
import time
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.report.misc import choice_colors
import StringIO
from pychart import *
theme.use_color = 1
#
# TODO: Bad code, seems buggy, TO CHECK !
#
class external_pdf(render):
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
class report_custom(report_int):
def _compute_dates(self, time_unit, start, stop):
if not stop:
stop = start
if time_unit == 'month':
dates = {}
a = int(start.split("-")[0])*12 + int(start.split("-")[1])
z = int(stop.split("-")[0])*12 + int(stop.split("-")[1]) + 1
for i in range(a,z):
year = i/12
month = i%12
if month == 0:
year -= 1
month = 12
months = {1:"January",2:"February",3:"March",4:"April",5:"May",6:"June",7:"July",8:"August",9:"September",10:"October",11:"November",12:"December"}
dates[i] = {
'name' :months[month],
'start':(datetime(year, month, 2) + relativedelta(day=1)).strftime('%Y-%m-%d'),
'stop' :(datetime(year, month, 2) + relativedelta(day=31)).strftime('%Y-%m-%d'),
}
return dates
elif time_unit == 'week':
dates = {}
start_week = date(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2])).isocalendar()
end_week = date(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2])).isocalendar()
a = int(start.split("-")[0])*52 + start_week[1]
z = int(stop.split("-")[0])*52 + end_week[1]
for i in range(a,z+1):
year = i/52
week = i%52
d = date(year, 1, 1)
dates[i] = {
'name' :"Week #%d" % week,
'start':(d + timedelta(days=-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
'stop' :(d + timedelta(days=6-d.weekday(), weeks=week)).strftime('%Y-%m-%d'),
}
return dates
else: # time_unit = day
dates = {}
a = datetime(int(start.split("-")[0]),int(start.split("-")[1]),int(start.split("-")[2]))
z = datetime(int(stop.split("-")[0]),int(stop.split("-")[1]),int(stop.split("-")[2]))
i = a
while i <= z:
dates[map(int,i.strftime('%Y%m%d').split())[0]] = {
'name' :i.strftime('%Y-%m-%d'),
'start':i.strftime('%Y-%m-%d'),
'stop' :i.strftime('%Y-%m-%d'),
}
i = i + relativedelta(days=+1)
return dates
return {}
def create(self, cr, uid, ids, datas, context=None):
assert len(ids), 'You should provide some ids!'
colors = choice_colors(len(ids))
cr.execute(
"SELECT MAX(mrp_production.date_planned) AS stop,MIN(mrp_production.date_planned) AS start "\
"FROM mrp_workcenter, mrp_production, mrp_production_workcenter_line "\
"WHERE mrp_production_workcenter_line.production_id=mrp_production.id "\
"AND mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id "\
"AND mrp_production.state NOT IN ('cancel','done') "\
"AND mrp_workcenter.id IN %s",(tuple(ids),))
res = cr.dictfetchone()
if not res['stop']:
res['stop'] = time.strftime('%Y-%m-%d %H:%M:%S')
if not res['start']:
res['start'] = time.strftime('%Y-%m-%d %H:%M:%S')
dates = self._compute_dates(datas['form']['time_unit'], res['start'][:10], res['stop'][:10])
dates_list = dates.keys()
dates_list.sort()
x_index = []
for date in dates_list:
x_index.append((dates[date]['name'], date))
pdf_string = StringIO.StringIO()
can = canvas.init(fname=pdf_string, format='pdf')
can.set_title("Work Center Loads")
chart_object.set_defaults(line_plot.T, line_style=None)
if datas['form']['measure_unit'] == 'cycles':
y_label = "Load (Cycles)"
else:
y_label = "Load (Hours)"
# For add the report header on the top of the report.
tb = text_box.T(loc=(300, 500), text="/hL/15/bWork Center Loads", line_style=None)
tb.draw()
ar = area.T(legend = legend.T(),
x_grid_style = line_style.gray70_dash1,
x_axis = axis.X(label="Periods", format="/a90/hC%s"),
x_coord = category_coord.T(x_index, 0),
y_axis = axis.Y(label=y_label),
y_range = (0, None),
size = (640,480))
bar_plot.fill_styles.reset();
# select workcenters
cr.execute(
"SELECT mw.id, rs.name FROM mrp_workcenter mw, resource_resource rs " \
"WHERE mw.id IN %s and mw.resource_id=rs.id " \
"ORDER BY mw.id" ,(tuple(ids),))
workcenters = cr.dictfetchall()
data = []
for date in dates_list:
vals = []
for workcenter in workcenters:
cr.execute("SELECT SUM(mrp_production_workcenter_line.hour) AS hours, SUM(mrp_production_workcenter_line.cycle) AS cycles, \
resource_resource.name AS name, mrp_workcenter.id AS id \
FROM mrp_production_workcenter_line, mrp_production, mrp_workcenter, resource_resource \
WHERE (mrp_production_workcenter_line.production_id=mrp_production.id) \
AND (mrp_production_workcenter_line.workcenter_id=mrp_workcenter.id) \
AND (mrp_workcenter.resource_id=resource_resource.id) \
AND (mrp_workcenter.id=%s) \
AND (mrp_production.date_planned BETWEEN %s AND %s) \
GROUP BY mrp_production_workcenter_line.workcenter_id, resource_resource.name, mrp_workcenter.id \
ORDER BY mrp_workcenter.id", (workcenter['id'], dates[date]['start'] + ' 00:00:00', dates[date]['stop'] + ' 23:59:59'))
res = cr.dictfetchall()
if not res:
vals.append(0.0)
else:
if datas['form']['measure_unit'] == 'cycles':
vals.append(res[0]['cycles'] or 0.0)
else:
vals.append(res[0]['hours'] or 0.0)
toto = [dates[date]['name']]
for val in vals:
toto.append(val)
data.append(toto)
workcenter_num = 0
for workcenter in workcenters:
f = fill_style.Plain()
f.bgcolor = colors[workcenter_num]
ar.add_plot(bar_plot.T(label=workcenter['name'], data=data, fill_style=f, hcol=workcenter_num+1, cluster=(workcenter_num, len(res))))
workcenter_num += 1
if (not data) or (len(data[0]) <= 1):
ar = self._empty_graph(time.strftime('%Y-%m-%d'))
ar.draw(can)
# close canvas so that the file is written to "disk"
can.close()
self.obj = external_pdf(pdf_string.getvalue())
self.obj.render()
pdf_string.close()
return (self.obj.pdf, 'pdf')
def _empty_graph(self, date):
data = [[date, 0]]
ar = area.T(x_coord = category_coord.T(data, 0), y_range = (0, None),
x_axis = axis.X(label="Periods"),
y_axis = axis.Y(label="Load"))
ar.add_plot(bar_plot.T(data = data, label="No production order"))
return ar
report_custom('report.mrp.workcenter.load')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
omnirom/android_external_chromium-org
|
refs/heads/android-5.1
|
chrome/common/extensions/docs/examples/apps/hello-python/main.py
|
148
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.ext.webapp import template
from google.appengine.api.urlfetch import DownloadError
import oauth2
import urllib
import logging
import os
import time
from django.utils import simplejson
# Configuration
CONFIG = {
'oauth_consumer_key': 'anonymous',
'oauth_consumer_secret': 'anonymous',
'license_server': 'https://www.googleapis.com',
'license_path': '%(server)s/chromewebstore/v1/licenses/%(appid)s/%(userid)s',
'oauth_token': 'INSERT OAUTH TOKEN HERE',
'oauth_token_secret': 'INSERT OAUTH TOKEN SECRET HERE',
'app_id': 'INSERT APPLICATION ID HERE',
}
# Check to see if the server has been deployed. In the dev server, this
# env variable will start with 'Development', in production, it will start with
# 'Google App Engine'
IS_PRODUCTION = os.environ['SERVER_SOFTWARE'].startswith('Google App Engine')
# Valid access levels that may be returned by the license server.
VALID_ACCESS_LEVELS = ['FREE_TRIAL', 'FULL']
def fetch_license_data(userid):
"""Fetches the license for a given user by making an OAuth signed request
to the license server.
Args:
userid OpenID of the user you are checking access for.
Returns:
The server's response as text.
"""
url = CONFIG['license_path'] % {
'server': CONFIG['license_server'],
'appid': CONFIG['app_id'],
'userid': urllib.quote_plus(userid),
}
oauth_token = oauth2.Token(**{
'key': CONFIG['oauth_token'],
'secret': CONFIG['oauth_token_secret']
})
oauth_consumer = oauth2.Consumer(**{
'key': CONFIG['oauth_consumer_key'],
'secret': CONFIG['oauth_consumer_secret']
})
logging.debug('Requesting %s' % url)
client = oauth2.Client(oauth_consumer, oauth_token)
resp, content = client.request(url, 'GET')
logging.debug('Got response code %s, content %s' % (resp, content))
return content
def parse_license_data(userid):
"""Returns the license for a given user as a structured object.
Args:
userid: The OpenID of the user to check.
Returns:
An object with the following parameters:
error: True if something went wrong, False otherwise.
message: A descriptive message if error is True.
access: One of 'NO', 'FREE_TRIAL', or 'FULL' depending on the access.
"""
license = {'error': False, 'message': '', 'access': 'NO'}
try:
response_text = fetch_license_data(userid)
try:
logging.debug('Attempting to JSON parse: %s' % response_text)
json = simplejson.loads(response_text)
logging.debug('Got license server response: %s' % json)
except ValueError:
logging.exception('Could not parse response as JSON: %s' % response_text)
license['error'] = True
license['message'] = 'Could not parse the license server response'
except DownloadError:
logging.exception('Could not fetch license data')
license['error'] = True
license['message'] = 'Could not fetch license data'
if json.has_key('error'):
license['error'] = True
license['message'] = json['error']['message']
elif json['result'] == 'YES' and json['accessLevel'] in VALID_ACCESS_LEVELS:
license['access'] = json['accessLevel']
return license
class MainHandler(webapp.RequestHandler):
"""Request handler class."""
def get(self):
"""Handler for GET requests."""
user = users.get_current_user()
if user:
if IS_PRODUCTION:
# We should use federated_identity in production, since the license
# server requires an OpenID
userid = user.federated_identity()
else:
# On the dev server, we won't have access to federated_identity, so
# just use a default OpenID which will never return YES.
# If you want to test different response values on the development
# server, just change this default value (e.g. append '-yes' or
# '-trial').
userid = ('https://www.google.com/accounts/o8/id?'
'id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
license_data = parse_license_data(userid)
template_data = {
'license': license_data,
'user_name': user.nickname(),
'user_id': userid,
'user_logout': users.create_logout_url(self.request.uri),
}
else:
# Force the OpenID login endpoint to be for Google accounts only, since
# the license server doesn't support any other type of OpenID provider.
login_url = users.create_login_url(dest_url='/',
federated_identity='google.com/accounts/o8/id')
template_data = {
'user_login': login_url,
}
# Render a simple template
path = os.path.join(os.path.dirname(__file__), 'templates', 'index.html')
self.response.out.write(template.render(path, template_data))
if __name__ == '__main__':
application = webapp.WSGIApplication([
('/', MainHandler),
], debug=False)
util.run_wsgi_app(application)
|
kamyu104/LeetCode
|
refs/heads/master
|
Python/remove-element.py
|
2
|
from __future__ import print_function
# Time: O(n)
# Space: O(1)
#
# Given an array and a value, remove all instances of that value in place and return the new length.
#
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
i, last = 0, len(A) - 1
while i <= last:
if A[i] == elem:
A[i], A[last] = A[last], A[i]
last -= 1
else:
i += 1
return last + 1
if __name__ == "__main__":
print(Solution().removeElement([1, 2, 3, 4, 5, 2, 2], 2))
|
chirilo/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
|
122
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import errno
import hashlib
import os
import re
from webkitpy.common.system import path
class MockFileSystem(object):
sep = '/'
pardir = '..'
def __init__(self, files=None, dirs=None, cwd='/'):
"""Initializes a "mock" filesystem that can be used to completely
stub out a filesystem.
Args:
files: a dict of filenames -> file contents. A file contents
value of None is used to indicate that the file should
not exist.
"""
self.files = files or {}
self.written_files = {}
self.last_tmpdir = None
self.current_tmpno = 0
self.cwd = cwd
self.dirs = set(dirs or [])
self.dirs.add(cwd)
for f in self.files:
d = self.dirname(f)
while not d in self.dirs:
self.dirs.add(d)
d = self.dirname(d)
def clear_written_files(self):
# This function can be used to track what is written between steps in a test.
self.written_files = {}
def _raise_not_found(self, path):
raise IOError(errno.ENOENT, path, os.strerror(errno.ENOENT))
def _split(self, path):
# This is not quite a full implementation of os.path.split
# http://docs.python.org/library/os.path.html#os.path.split
if self.sep in path:
return path.rsplit(self.sep, 1)
return ('', path)
def abspath(self, path):
if os.path.isabs(path):
return self.normpath(path)
return self.abspath(self.join(self.cwd, path))
def realpath(self, path):
return self.abspath(path)
def basename(self, path):
return self._split(path)[1]
def expanduser(self, path):
if path[0] != "~":
return path
parts = path.split(self.sep, 1)
home_directory = self.sep + "Users" + self.sep + "mock"
if len(parts) == 1:
return home_directory
return home_directory + self.sep + parts[1]
def path_to_module(self, module_name):
return "/mock-checkout/Tools/Scripts/" + module_name.replace('.', '/') + ".py"
def chdir(self, path):
path = self.normpath(path)
if not self.isdir(path):
raise OSError(errno.ENOENT, path, os.strerror(errno.ENOENT))
self.cwd = path
def copyfile(self, source, destination):
if not self.exists(source):
self._raise_not_found(source)
if self.isdir(source):
raise IOError(errno.EISDIR, source, os.strerror(errno.EISDIR))
if self.isdir(destination):
raise IOError(errno.EISDIR, destination, os.strerror(errno.EISDIR))
if not self.exists(self.dirname(destination)):
raise IOError(errno.ENOENT, destination, os.strerror(errno.ENOENT))
self.files[destination] = self.files[source]
self.written_files[destination] = self.files[source]
def dirname(self, path):
return self._split(path)[0]
def exists(self, path):
return self.isfile(path) or self.isdir(path)
def files_under(self, path, dirs_to_skip=[], file_filter=None):
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)) and self.files[path] is not None:
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
if not path.endswith(self.sep):
path += self.sep
dir_substrings = [self.sep + d + self.sep for d in dirs_to_skip]
for filename in self.files:
if not filename.startswith(path):
continue
suffix = filename[len(path) - 1:]
if any(dir_substring in suffix for dir_substring in dir_substrings):
continue
dirpath, basename = self._split(filename)
if file_filter(self, dirpath, basename) and self.files[filename] is not None:
files.append(filename)
return files
def getcwd(self):
return self.cwd
def glob(self, glob_string):
# FIXME: This handles '*', but not '?', '[', or ']'.
glob_string = re.escape(glob_string)
glob_string = glob_string.replace('\\*', '[^\\/]*') + '$'
glob_string = glob_string.replace('\\/', '/')
path_filter = lambda path: re.match(glob_string, path)
# We could use fnmatch.fnmatch, but that might not do the right thing on windows.
existing_files = [path for path, contents in self.files.items() if contents is not None]
return filter(path_filter, existing_files) + filter(path_filter, self.dirs)
def isabs(self, path):
return path.startswith(self.sep)
def isfile(self, path):
return path in self.files and self.files[path] is not None
def isdir(self, path):
return self.normpath(path) in self.dirs
def _slow_but_correct_join(self, *comps):
return re.sub(re.escape(os.path.sep), self.sep, os.path.join(*comps))
def join(self, *comps):
# This function is called a lot, so we optimize it; there are
# unittests to check that we match _slow_but_correct_join(), above.
path = ''
sep = self.sep
for comp in comps:
if not comp:
continue
if comp[0] == sep:
path = comp
continue
if path:
path += sep
path += comp
if comps[-1] == '' and path:
path += '/'
path = path.replace(sep + sep, sep)
return path
def listdir(self, path):
sep = self.sep
if not self.isdir(path):
raise OSError("%s is not a directory" % path)
if not path.endswith(sep):
path += sep
dirs = []
files = []
for f in self.files:
if self.exists(f) and f.startswith(path):
remaining = f[len(path):]
if sep in remaining:
dir = remaining[:remaining.index(sep)]
if not dir in dirs:
dirs.append(dir)
else:
files.append(remaining)
return dirs + files
def mtime(self, path):
if self.exists(path):
return 0
self._raise_not_found(path)
def _mktemp(self, suffix='', prefix='tmp', dir=None, **kwargs):
if dir is None:
dir = self.sep + '__im_tmp'
curno = self.current_tmpno
self.current_tmpno += 1
self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
return self.last_tmpdir
def mkdtemp(self, **kwargs):
class TemporaryDirectory(object):
def __init__(self, fs, **kwargs):
self._kwargs = kwargs
self._filesystem = fs
self._directory_path = fs._mktemp(**kwargs)
fs.maybe_make_directory(self._directory_path)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if self._filesystem.exists(self._directory_path):
self._filesystem.rmtree(self._directory_path)
return TemporaryDirectory(fs=self, **kwargs)
def maybe_make_directory(self, *path):
norm_path = self.normpath(self.join(*path))
while norm_path and not self.isdir(norm_path):
self.dirs.add(norm_path)
norm_path = self.dirname(norm_path)
def move(self, source, destination):
if self.files[source] is None:
self._raise_not_found(source)
self.files[destination] = self.files[source]
self.written_files[destination] = self.files[destination]
self.files[source] = None
self.written_files[source] = None
def _slow_but_correct_normpath(self, path):
return re.sub(re.escape(os.path.sep), self.sep, os.path.normpath(path))
def normpath(self, path):
# This function is called a lot, so we try to optimize the common cases
# instead of always calling _slow_but_correct_normpath(), above.
if '..' in path or '/./' in path:
# This doesn't happen very often; don't bother trying to optimize it.
return self._slow_but_correct_normpath(path)
if not path:
return '.'
if path == '/':
return path
if path == '/.':
return '/'
if path.endswith('/.'):
return path[:-2]
if path.endswith('/'):
return path[:-1]
return path
def open_binary_tempfile(self, suffix=''):
path = self._mktemp(suffix)
return (WritableBinaryFileObject(self, path), path)
def open_binary_file_for_reading(self, path):
if self.files[path] is None:
self._raise_not_found(path)
return ReadableBinaryFileObject(self, path, self.files[path])
def read_binary_file(self, path):
# Intentionally raises KeyError if we don't recognize the path.
if self.files[path] is None:
self._raise_not_found(path)
return self.files[path]
def write_binary_file(self, path, contents):
# FIXME: should this assert if dirname(path) doesn't exist?
self.maybe_make_directory(self.dirname(path))
self.files[path] = contents
self.written_files[path] = contents
def open_text_file_for_reading(self, path):
if self.files[path] is None:
self._raise_not_found(path)
return ReadableTextFileObject(self, path, self.files[path])
def open_text_file_for_writing(self, path):
return WritableTextFileObject(self, path)
def read_text_file(self, path):
return self.read_binary_file(path).decode('utf-8')
def write_text_file(self, path, contents):
return self.write_binary_file(path, contents.encode('utf-8'))
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
# Since os.path.relpath() calls os.path.normpath()
# (see http://docs.python.org/library/os.path.html#os.path.abspath )
# it also removes trailing slashes and converts forward and backward
# slashes to the preferred slash os.sep.
start = self.abspath(start)
path = self.abspath(path)
if not path.lower().startswith(start.lower()):
# path is outside the directory given by start; compute path from root
return '../' * start.count('/') + path
rel_path = path[len(start):]
if not rel_path:
# Then the paths are the same.
pass
elif rel_path[0] == self.sep:
# It is probably sufficient to remove just the first character
# since os.path.normpath() collapses separators, but we use
# lstrip() just to be sure.
rel_path = rel_path.lstrip(self.sep)
else:
# We are in the case typified by the following example:
# path = "/tmp/foobar", start = "/tmp/foo" -> rel_path = "bar"
# FIXME: We return a less-than-optimal result here.
return '../' * start.count('/') + path
return rel_path
def remove(self, path):
if self.files[path] is None:
self._raise_not_found(path)
self.files[path] = None
self.written_files[path] = None
def rmtree(self, path):
path = self.normpath(path)
for f in self.files:
if f.startswith(path):
self.files[f] = None
self.dirs = set(filter(lambda d: not d.startswith(path), self.dirs))
def copytree(self, source, destination):
source = self.normpath(source)
destination = self.normpath(destination)
for source_file in self.files:
if source_file.startswith(source):
destination_path = self.join(destination, self.relpath(source_file, source))
self.maybe_make_directory(self.dirname(destination_path))
self.files[destination_path] = self.files[source_file]
def split(self, path):
idx = path.rfind(self.sep)
if idx == -1:
return ('', path)
return (path[:idx], path[(idx + 1):])
def splitext(self, path):
idx = path.rfind('.')
if idx == -1:
idx = len(path)
return (path[0:idx], path[idx:])
class WritableBinaryFileObject(object):
def __init__(self, fs, path):
self.fs = fs
self.path = path
self.closed = False
self.fs.files[path] = ""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.closed = True
def write(self, str):
self.fs.files[self.path] += str
self.fs.written_files[self.path] = self.fs.files[self.path]
class WritableTextFileObject(WritableBinaryFileObject):
def write(self, str):
WritableBinaryFileObject.write(self, str.encode('utf-8'))
class ReadableBinaryFileObject(object):
def __init__(self, fs, path, data):
self.fs = fs
self.path = path
self.closed = False
self.data = data
self.offset = 0
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.closed = True
def read(self, bytes=None):
if not bytes:
return self.data[self.offset:]
start = self.offset
self.offset += bytes
return self.data[start:self.offset]
class ReadableTextFileObject(ReadableBinaryFileObject):
def __init__(self, fs, path, data):
super(ReadableTextFileObject, self).__init__(fs, path, StringIO.StringIO(data.decode("utf-8")))
def close(self):
self.data.close()
super(ReadableTextFileObject, self).close()
def read(self, bytes=-1):
return self.data.read(bytes)
def readline(self, length=None):
return self.data.readline(length)
def __iter__(self):
return self.data.__iter__()
def next(self):
return self.data.next()
def seek(self, offset, whence=os.SEEK_SET):
self.data.seek(offset, whence)
|
Korkki/django
|
refs/heads/master
|
tests/save_delete_hooks/models.py
|
409
|
"""
Adding hooks before/after saving and deleting
To execute arbitrary code around ``save()`` and ``delete()``, just subclass
the methods.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.data = []
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.data.append("Before save")
# Call the "real" save() method
super(Person, self).save(*args, **kwargs)
self.data.append("After save")
def delete(self):
self.data.append("Before deletion")
# Call the "real" delete() method
super(Person, self).delete()
self.data.append("After deletion")
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/commands/me_reckless_stimulation_3.py
|
2
|
import sys
from services.command import CombatCommand
def setup(core, actor, buff):
return
def run(core, actor, target, commandString):
combatSvc = core.combatService
weapon = core.objectService.getObject(actor.getWeaponId())
if not weapon:
weapon = actor.getSlottedObject('default_weapon')
command = core.commandService.getCommandByName('me_reckless_stimulation_3')
if not combatSvc.applySpecialCost(actor, weapon, CombatCommand('me_reckless_stimulation_3')):
return
actor.playEffectObject('clienteffect/medic_reckless_stimulation.cef', 'root')
actor.setAction(actor.getAction() + 2000)
return
|
cowlicks/dask
|
refs/heads/master
|
dask/dataframe/tests/test_categorical.py
|
1
|
import pandas as pd
import pandas.util.testing as tm
import pytest
import dask
from dask.async import get_sync
import dask.dataframe as dd
from dask.dataframe.utils import make_meta
def test_categorize():
dsk = {('x', 0): pd.DataFrame({'a': ['Alice', 'Bob', 'Alice'],
'b': ['C', 'D', 'E']},
index=[0, 1, 2]),
('x', 1): pd.DataFrame({'a': ['Bob', 'Charlie', 'Charlie'],
'b': ['A', 'A', 'B']},
index=[3, 4, 5])}
meta = make_meta({'a': 'O', 'b': 'O'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 3, 5])
full = d.compute()
c = d.categorize('a')
cfull = c.compute()
assert cfull.dtypes['a'] == 'category'
assert cfull.dtypes['b'] == 'O'
assert list(cfull.a.astype('O')) == list(full.a)
assert (d._get(c.dask, c._keys()[:1])[0].dtypes == cfull.dtypes).all()
assert (d.categorize().compute().dtypes == 'category').all()
@pytest.mark.parametrize('shuffle', ['disk', 'tasks'])
def test_categorical_set_index(shuffle):
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': ['a', 'b', 'b', 'c']})
df['y'] = df.y.astype('category', ordered=True)
a = dd.from_pandas(df, npartitions=2)
with dask.set_options(get=get_sync, shuffle=shuffle):
b = a.set_index('y')
d1, d2 = b.get_partition(0), b.get_partition(1)
assert list(d1.index.compute()) == ['a']
assert list(sorted(d2.index.compute())) == ['b', 'b', 'c']
b = a.set_index(a.y)
d1, d2 = b.get_partition(0), b.get_partition(1)
assert list(d1.index.compute()) == ['a']
assert list(sorted(d2.index.compute())) == ['b', 'b', 'c']
b = a.set_partition('y', ['a', 'b', 'c'])
d1, d2 = b.get_partition(0), b.get_partition(1)
assert list(d1.index.compute()) == ['a']
assert list(sorted(d2.index.compute())) == ['b', 'b', 'c']
def test_dataframe_categoricals():
df = pd.DataFrame({'x': list('a'*5 + 'b'*5 + 'c'*5),
'y': range(15)})
df.x = df.x.astype('category')
ddf = dd.from_pandas(df, npartitions=2)
assert (ddf.x.cat.categories == pd.Index(['a', 'b', 'c'])).all()
assert 'cat' in dir(ddf.x)
assert not hasattr(df.y, 'cat')
assert 'cat' not in dir(ddf.y)
def test_categories():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Categorical(['a', 'b', 'a', 'c'])},
index=pd.CategoricalIndex(['x', 'x', 'y', 'y']))
categories = dd.categorical.get_categories(df)
assert set(categories.keys()) == set(['y', '.index'])
assert list(categories['y']) == ['a', 'b', 'c']
assert list(categories['.index']) == ['x', 'y']
df2 = dd.categorical.strip_categories(df)
assert not dd.categorical.get_categories(df2)
df3 = dd.categorical._categorize(categories, df2)
tm.assert_frame_equal(df, df3)
|
srmagura/goodnight-lead
|
refs/heads/master
|
gl_site/migrations/0014_dashboardtext.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('gl_site', '0013_siteconfig'),
]
operations = [
migrations.CreateModel(
name='DashboardText',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('inventory_desc', models.TextField()),
('mental_health_warning', models.TextField()),
('about_panel_title', models.TextField()),
('about_panel_contents', ckeditor.fields.RichTextField()),
],
options={
'verbose_name': 'Dashboard text',
'verbose_name_plural': 'Dashboard text',
},
),
]
|
OBA-code/tgstation
|
refs/heads/master
|
tools/midi2piano/midi/midi.py
|
102
|
#! /usr/bin/python3
# unsupported 20091104 ...
# ['set_sequence_number', dtime, sequence]
# ['raw_data', dtime, raw]
r'''
This module offers functions: concatenate_scores(), grep(),
merge_scores(), mix_scores(), midi2opus(), midi2score(), opus2midi(),
opus2score(), play_score(), score2midi(), score2opus(), score2stats(),
score_type(), segment(), timeshift() and to_millisecs(),
where "midi" means the MIDI-file bytes (as can be put in a .mid file,
or piped into aplaymidi), and "opus" and "score" are list-structures
as inspired by Sean Burke's MIDI-Perl CPAN module.
Download MIDI.py from http://www.pjb.com.au/midi/free/MIDI.py
and put it in your PYTHONPATH. MIDI.py depends on Python3.
There is also a call-compatible translation into Lua of this
module: see http://www.pjb.com.au/comp/lua/MIDI.html
The "opus" is a direct translation of the midi-file-events, where
the times are delta-times, in ticks, since the previous event.
The "score" is more human-centric; it uses absolute times, and
combines the separate note_on and note_off events into one "note"
event, with a duration:
['note', start_time, duration, channel, note, velocity] # in a "score"
EVENTS (in an "opus" structure)
['note_off', dtime, channel, note, velocity] # in an "opus"
['note_on', dtime, channel, note, velocity] # in an "opus"
['key_after_touch', dtime, channel, note, velocity]
['control_change', dtime, channel, controller(0-127), value(0-127)]
['patch_change', dtime, channel, patch]
['channel_after_touch', dtime, channel, velocity]
['pitch_wheel_change', dtime, channel, pitch_wheel]
['text_event', dtime, text]
['copyright_text_event', dtime, text]
['track_name', dtime, text]
['instrument_name', dtime, text]
['lyric', dtime, text]
['marker', dtime, text]
['cue_point', dtime, text]
['text_event_08', dtime, text]
['text_event_09', dtime, text]
['text_event_0a', dtime, text]
['text_event_0b', dtime, text]
['text_event_0c', dtime, text]
['text_event_0d', dtime, text]
['text_event_0e', dtime, text]
['text_event_0f', dtime, text]
['end_track', dtime]
['set_tempo', dtime, tempo]
['smpte_offset', dtime, hr, mn, se, fr, ff]
['time_signature', dtime, nn, dd, cc, bb]
['key_signature', dtime, sf, mi]
['sequencer_specific', dtime, raw]
['raw_meta_event', dtime, command(0-255), raw]
['sysex_f0', dtime, raw]
['sysex_f7', dtime, raw]
['song_position', dtime, song_pos]
['song_select', dtime, song_number]
['tune_request', dtime]
DATA TYPES
channel = a value 0 to 15
controller = 0 to 127 (see http://www.pjb.com.au/muscript/gm.html#cc )
dtime = time measured in "ticks", 0 to 268435455
velocity = a value 0 (soft) to 127 (loud)
note = a value 0 to 127 (middle-C is 60)
patch = 0 to 127 (see http://www.pjb.com.au/muscript/gm.html )
pitch_wheel = a value -8192 to 8191 (0x1FFF)
raw = 0 or more bytes of binary data
sequence_number = a value 0 to 65,535 (0xFFFF)
song_pos = a value 0 to 16,383 (0x3FFF)
song_number = a value 0 to 127
tempo = microseconds per crochet (quarter-note), 0 to 16777215
text = a string of 0 or more bytes of of ASCII text
ticks = the number of ticks per crochet (quarter-note)
GOING THROUGH A SCORE WITHIN A PYTHON PROGRAM
channels = {2,3,5,8,13}
itrack = 1 # skip 1st element which is ticks
while itrack < len(score):
for event in score[itrack]:
if event[0] == 'note': # for example,
pass # do something to all notes
# or, to work on events in only particular channels...
channel_index = MIDI.Event2channelindex.get(event[0], False)
if channel_index and (event[channel_index] in channels):
pass # do something to channels 2,3,5,8 and 13
itrack += 1
'''
import sys, struct, os, copy
# sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb')
Version = '6.2'
VersionDate = '20150101'
# 20150101 6.2 all text events can be 8-bit; let user get the right encoding
# 20141231 6.1 fix _some_text_event; sequencer_specific data can be 8-bit
# 20141230 6.0 synth_specific data can be 8-bit
# 20120504 5.9 add the contents of mid_opus_tracks()
# 20120208 5.8 fix num_notes_by_channel() ; should be a dict
# 20120129 5.7 _encode handles empty tracks; score2stats num_notes_by_channel
# 20111111 5.6 fix patch 45 and 46 in Number2patch, should be Harp
# 20110129 5.5 add mix_opus_tracks() and event2alsaseq()
# 20110126 5.4 "previous message repeated N times" to save space on stderr
# 20110125 5.2 opus2score terminates unended notes at the end of the track
# 20110124 5.1 the warnings in midi2opus display track_num
# 21110122 5.0 if garbage, midi2opus returns the opus so far
# 21110119 4.9 non-ascii chars stripped out of the text_events
# 21110110 4.8 note_on with velocity=0 treated as a note-off
# 21110108 4.6 unknown F-series event correctly eats just one byte
# 21011010 4.2 segment() uses start_time, end_time named params
# 21011005 4.1 timeshift() must not pad the set_tempo command
# 21011003 4.0 pitch2note_event must be chapitch2note_event
# 21010918 3.9 set_sequence_number supported, FWIW
# 20100913 3.7 many small bugfixes; passes all tests
# 20100910 3.6 concatenate_scores enforce ticks=1000, just like merge_scores
# 20100908 3.5 minor bugs fixed in score2stats
# 20091104 3.4 tune_request now supported
# 20091104 3.3 fixed bug in decoding song_position and song_select
# 20091104 3.2 unsupported: set_sequence_number tune_request raw_data
# 20091101 3.1 document how to traverse a score within Python
# 20091021 3.0 fixed bug in score2stats detecting GM-mode = 0
# 20091020 2.9 score2stats reports GM-mode and bank msb,lsb events
# 20091019 2.8 in merge_scores, channel 9 must remain channel 9 (in GM)
# 20091018 2.7 handles empty tracks gracefully
# 20091015 2.6 grep() selects channels
# 20091010 2.5 merge_scores reassigns channels to avoid conflicts
# 20091010 2.4 fixed bug in to_millisecs which now only does opusses
# 20091010 2.3 score2stats returns channels & patch_changes, by_track & total
# 20091010 2.2 score2stats() returns also pitches and percussion dicts
# 20091010 2.1 bugs: >= not > in segment, to notice patch_change at time 0
# 20091010 2.0 bugs: spurious pop(0) ( in _decode sysex
# 20091008 1.9 bugs: ISO decoding in sysex; str( not int( in note-off warning
# 20091008 1.8 add concatenate_scores()
# 20091006 1.7 score2stats() measures nticks and ticks_per_quarter
# 20091004 1.6 first mix_scores() and merge_scores()
# 20090424 1.5 timeshift() bugfix: earliest only sees events after from_time
# 20090330 1.4 timeshift() has also a from_time argument
# 20090322 1.3 timeshift() has also a start_time argument
# 20090319 1.2 add segment() and timeshift()
# 20090301 1.1 add to_millisecs()
_previous_warning = '' # 5.4
_previous_times = 0 # 5.4
#------------------------------- Encoding stuff --------------------------
def opus2midi(opus=[]):
r'''The argument is a list: the first item in the list is the "ticks"
parameter, the others are the tracks. Each track is a list
of midi-events, and each event is itself a list; see above.
opus2midi() returns a bytestring of the MIDI, which can then be
written either to a file opened in binary mode (mode='wb'),
or to stdout by means of: sys.stdout.buffer.write()
my_opus = [
96,
[ # track 0:
['patch_change', 0, 1, 8], # and these are the events...
['note_on', 5, 1, 25, 96],
['note_off', 96, 1, 25, 0],
['note_on', 0, 1, 29, 96],
['note_off', 96, 1, 29, 0],
], # end of track 0
]
my_midi = opus2midi(my_opus)
sys.stdout.buffer.write(my_midi)
'''
if len(opus) < 2:
opus=[1000, [],]
tracks = copy.deepcopy(opus)
ticks = int(tracks.pop(0))
ntracks = len(tracks)
if ntracks == 1:
format = 0
else:
format = 1
my_midi = b"MThd\x00\x00\x00\x06"+struct.pack('>HHH',format,ntracks,ticks)
for track in tracks:
events = _encode(track)
my_midi += b'MTrk' + struct.pack('>I',len(events)) + events
_clean_up_warnings()
return my_midi
def score2opus(score=None):
r'''
The argument is a list: the first item in the list is the "ticks"
parameter, the others are the tracks. Each track is a list
of score-events, and each event is itself a list. A score-event
is similar to an opus-event (see above), except that in a score:
1) the times are expressed as an absolute number of ticks
from the track's start time
2) the pairs of 'note_on' and 'note_off' events in an "opus"
are abstracted into a single 'note' event in a "score":
['note', start_time, duration, channel, pitch, velocity]
score2opus() returns a list specifying the equivalent "opus".
my_score = [
96,
[ # track 0:
['patch_change', 0, 1, 8],
['note', 5, 96, 1, 25, 96],
['note', 101, 96, 1, 29, 96]
], # end of track 0
]
my_opus = score2opus(my_score)
'''
if len(score) < 2:
score=[1000, [],]
tracks = copy.deepcopy(score)
ticks = int(tracks.pop(0))
opus_tracks = []
for scoretrack in tracks:
time2events = dict([])
for scoreevent in scoretrack:
if scoreevent[0] == 'note':
note_on_event = ['note_on',scoreevent[1],
scoreevent[3],scoreevent[4],scoreevent[5]]
note_off_event = ['note_off',scoreevent[1]+scoreevent[2],
scoreevent[3],scoreevent[4],scoreevent[5]]
if time2events.get(note_on_event[1]):
time2events[note_on_event[1]].append(note_on_event)
else:
time2events[note_on_event[1]] = [note_on_event,]
if time2events.get(note_off_event[1]):
time2events[note_off_event[1]].append(note_off_event)
else:
time2events[note_off_event[1]] = [note_off_event,]
continue
if time2events.get(scoreevent[1]):
time2events[scoreevent[1]].append(scoreevent)
else:
time2events[scoreevent[1]] = [scoreevent,]
sorted_times = [] # list of keys
for k in time2events.keys():
sorted_times.append(k)
sorted_times.sort()
sorted_events = [] # once-flattened list of values sorted by key
for time in sorted_times:
sorted_events.extend(time2events[time])
abs_time = 0
for event in sorted_events: # convert abs times => delta times
delta_time = event[1] - abs_time
abs_time = event[1]
event[1] = delta_time
opus_tracks.append(sorted_events)
opus_tracks.insert(0,ticks)
_clean_up_warnings()
return opus_tracks
def score2midi(score=None):
r'''
Translates a "score" into MIDI, using score2opus() then opus2midi()
'''
return opus2midi(score2opus(score))
#--------------------------- Decoding stuff ------------------------
def midi2opus(midi=b''):
r'''Translates MIDI into a "opus". For a description of the
"opus" format, see opus2midi()
'''
my_midi=bytearray(midi)
if len(my_midi) < 4:
_clean_up_warnings()
return [1000,[],]
id = bytes(my_midi[0:4])
if id != b'MThd':
_warn("midi2opus: midi starts with "+str(id)+" instead of 'MThd'")
_clean_up_warnings()
return [1000,[],]
[length, format, tracks_expected, ticks] = struct.unpack(
'>IHHH', bytes(my_midi[4:14]))
if length != 6:
_warn("midi2opus: midi header length was "+str(length)+" instead of 6")
_clean_up_warnings()
return [1000,[],]
my_opus = [ticks,]
my_midi = my_midi[14:]
track_num = 1 # 5.1
while len(my_midi) >= 8:
track_type = bytes(my_midi[0:4])
if track_type != b'MTrk':
_warn('midi2opus: Warning: track #'+str(track_num)+' type is '+str(track_type)+" instead of b'MTrk'")
[track_length] = struct.unpack('>I', my_midi[4:8])
my_midi = my_midi[8:]
if track_length > len(my_midi):
_warn('midi2opus: track #'+str(track_num)+' length '+str(track_length)+' is too large')
_clean_up_warnings()
return my_opus # 5.0
my_midi_track = my_midi[0:track_length]
my_track = _decode(my_midi_track)
my_opus.append(my_track)
my_midi = my_midi[track_length:]
track_num += 1 # 5.1
_clean_up_warnings()
return my_opus
def opus2score(opus=[]):
r'''For a description of the "opus" and "score" formats,
see opus2midi() and score2opus().
'''
if len(opus) < 2:
_clean_up_warnings()
return [1000,[],]
tracks = copy.deepcopy(opus) # couple of slices probably quicker...
ticks = int(tracks.pop(0))
score = [ticks,]
for opus_track in tracks:
ticks_so_far = 0
score_track = []
chapitch2note_on_events = dict([]) # 4.0
for opus_event in opus_track:
ticks_so_far += opus_event[1]
if opus_event[0] == 'note_off' or (opus_event[0] == 'note_on' and opus_event[4] == 0): # 4.8
cha = opus_event[2]
pitch = opus_event[3]
key = cha*128 + pitch
if chapitch2note_on_events.get(key):
new_event = chapitch2note_on_events[key].pop(0)
new_event[2] = ticks_so_far - new_event[1]
score_track.append(new_event)
elif pitch > 127:
_warn('opus2score: note_off with no note_on, bad pitch='+str(pitch))
else:
_warn('opus2score: note_off with no note_on cha='+str(cha)+' pitch='+str(pitch))
elif opus_event[0] == 'note_on':
cha = opus_event[2]
pitch = opus_event[3]
key = cha*128 + pitch
new_event = ['note',ticks_so_far,0,cha,pitch, opus_event[4]]
if chapitch2note_on_events.get(key):
chapitch2note_on_events[key].append(new_event)
else:
chapitch2note_on_events[key] = [new_event,]
else:
opus_event[1] = ticks_so_far
score_track.append(opus_event)
# check for unterminated notes (OisÃn) -- 5.2
for chapitch in chapitch2note_on_events:
note_on_events = chapitch2note_on_events[chapitch]
for new_e in note_on_events:
new_e[2] = ticks_so_far - new_e[1]
score_track.append(new_e)
_warn("opus2score: note_on with no note_off cha="+str(new_e[3])+' pitch='+str(new_e[4])+'; adding note_off at end')
score.append(score_track)
_clean_up_warnings()
return score
def midi2score(midi=b''):
r'''
Translates MIDI into a "score", using midi2opus() then opus2score()
'''
return opus2score(midi2opus(midi))
def midi2ms_score(midi=b''):
r'''
Translates MIDI into a "score" with one beat per second and one
tick per millisecond, using midi2opus() then to_millisecs()
then opus2score()
'''
return opus2score(to_millisecs(midi2opus(midi)))
#------------------------ Other Transformations ---------------------
def to_millisecs(old_opus=None):
r'''Recallibrates all the times in an "opus" to use one beat
per second and one tick per millisecond. This makes it
hard to retrieve any information about beats or barlines,
but it does make it easy to mix different scores together.
'''
if old_opus == None:
return [1000,[],]
try:
old_tpq = int(old_opus[0])
except IndexError: # 5.0
_warn('to_millisecs: the opus '+str(type(old_opus))+' has no elements')
return [1000,[],]
new_opus = [1000,]
millisec_per_old_tick = 1000.0 / old_tpq # float: will be rounded later
itrack = 1
while itrack < len(old_opus):
millisec_so_far = 0.0
previous_millisec_so_far = 0.0
new_track = [['set_tempo',0,1000000],] # new "crochet" is 1 sec
for old_event in old_opus[itrack]:
if old_event[0] == 'note':
raise TypeError('to_millisecs needs an opus, not a score')
new_event = copy.deepcopy(old_event)
millisec_so_far += (millisec_per_old_tick * old_event[1])
new_event[1] = round(millisec_so_far - previous_millisec_so_far)
if old_event[0] == 'set_tempo':
millisec_per_old_tick = old_event[2] / (1000.0 * old_tpq)
else:
previous_millisec_so_far = millisec_so_far
new_track.append(new_event)
new_opus.append(new_track)
itrack += 1
_clean_up_warnings()
return new_opus
def event2alsaseq(event=None): # 5.5
r'''Converts an event into the format needed by the alsaseq module,
http://pp.com.mx/python/alsaseq
The type of track (opus or score) is autodetected.
'''
pass
def grep(score=None, channels=None):
r'''Returns a "score" containing only the channels specified
'''
if score == None:
return [1000,[],]
ticks = score[0]
new_score = [ticks,]
if channels == None:
return new_score
channels = set(channels)
global Event2channelindex
itrack = 1
while itrack < len(score):
new_score.append([])
for event in score[itrack]:
channel_index = Event2channelindex.get(event[0], False)
if channel_index:
if event[channel_index] in channels:
new_score[itrack].append(event)
else:
new_score[itrack].append(event)
itrack += 1
return new_score
def play_score(score=None):
r'''Converts the "score" to midi, and feeds it into 'aplaymidi -'
'''
if score == None:
return
import subprocess
pipe = subprocess.Popen(['aplaymidi','-'], stdin=subprocess.PIPE)
if score_type(score) == 'opus':
pipe.stdin.write(opus2midi(score))
else:
pipe.stdin.write(score2midi(score))
pipe.stdin.close()
def timeshift(score=None, shift=None, start_time=None, from_time=0, tracks={0,1,2,3,4,5,6,7,8,10,12,13,14,15}):
r'''Returns a "score" shifted in time by "shift" ticks, or shifted
so that the first event starts at "start_time" ticks.
If "from_time" is specified, only those events in the score
that begin after it are shifted. If "start_time" is less than
"from_time" (or "shift" is negative), then the intermediate
notes are deleted, though patch-change events are preserved.
If "tracks" are specified, then only those tracks get shifted.
"tracks" can be a list, tuple or set; it gets converted to set
internally.
It is deprecated to specify both "shift" and "start_time".
If this does happen, timeshift() will print a warning to
stderr and ignore the "shift" argument.
If "shift" is negative and sufficiently large that it would
leave some event with a negative tick-value, then the score
is shifted so that the first event occurs at time 0. This
also occurs if "start_time" is negative, and is also the
default if neither "shift" nor "start_time" are specified.
'''
#_warn('tracks='+str(tracks))
if score == None or len(score) < 2:
return [1000, [],]
new_score = [score[0],]
my_type = score_type(score)
if my_type == '':
return new_score
if my_type == 'opus':
_warn("timeshift: opus format is not supported\n")
# _clean_up_scores() 6.2; doesn't exist! what was it supposed to do?
return new_score
if not (shift == None) and not (start_time == None):
_warn("timeshift: shift and start_time specified: ignoring shift\n")
shift = None
if shift == None:
if (start_time == None) or (start_time < 0):
start_time = 0
# shift = start_time - from_time
i = 1 # ignore first element (ticks)
tracks = set(tracks) # defend against tuples and lists
earliest = 1000000000
if not (start_time == None) or shift < 0: # first find the earliest event
while i < len(score):
if len(tracks) and not ((i-1) in tracks):
i += 1
continue
for event in score[i]:
if event[1] < from_time:
continue # just inspect the to_be_shifted events
if event[1] < earliest:
earliest = event[1]
i += 1
if earliest > 999999999:
earliest = 0
if shift == None:
shift = start_time - earliest
elif (earliest + shift) < 0:
start_time = 0
shift = 0 - earliest
i = 1 # ignore first element (ticks)
while i < len(score):
if len(tracks) == 0 or not ((i-1) in tracks): # 3.8
new_score.append(score[i])
i += 1
continue
new_track = []
for event in score[i]:
new_event = list(event)
#if new_event[1] == 0 and shift > 0 and new_event[0] != 'note':
# pass
#elif new_event[1] >= from_time:
if new_event[1] >= from_time:
# 4.1 must not rightshift set_tempo
if new_event[0] != 'set_tempo' or shift<0:
new_event[1] += shift
elif (shift < 0) and (new_event[1] >= (from_time+shift)):
continue
new_track.append(new_event)
if len(new_track) > 0:
new_score.append(new_track)
i += 1
_clean_up_warnings()
return new_score
def segment(score=None, start_time=None, end_time=None, start=0, end=100000000,
tracks={0,1,2,3,4,5,6,7,8,10,11,12,13,14,15}):
r'''Returns a "score" which is a segment of the one supplied
as the argument, beginning at "start_time" ticks and ending
at "end_time" ticks (or at the end if "end_time" is not supplied).
If the set "tracks" is specified, only those tracks will
be returned.
'''
if score == None or len(score) < 2:
return [1000, [],]
if start_time == None: # as of 4.2 start_time is recommended
start_time = start # start is legacy usage
if end_time == None: # likewise
end_time = end
new_score = [score[0],]
my_type = score_type(score)
if my_type == '':
return new_score
if my_type == 'opus':
# more difficult (disconnecting note_on's from their note_off's)...
_warn("segment: opus format is not supported\n")
_clean_up_warnings()
return new_score
i = 1 # ignore first element (ticks); we count in ticks anyway
tracks = set(tracks) # defend against tuples and lists
while i < len(score):
if len(tracks) and not ((i-1) in tracks):
i += 1
continue
new_track = []
channel2patch_num = {} # keep most recent patch change before start
channel2patch_time = {}
set_tempo_num = 1000000 # keep most recent tempo change before start
set_tempo_time = 0
earliest_note_time = end_time
for event in score[i]:
if event[0] == 'patch_change':
patch_time = channel2patch_time.get(event[2]) or 0
if (event[1] < start_time) and (event[1] >= patch_time): # 2.0
channel2patch_num[event[2]] = event[3]
channel2patch_time[event[2]] = event[1]
if event[0] == 'set_tempo':
if (event[1] < start_time) and (event[1] >= set_tempo_time):
set_tempo_num = event[2]
set_tempo_time = event[1]
if (event[1] >= start_time) and (event[1] <= end_time):
new_track.append(event)
if (event[0] == 'note') and (event[1] < earliest_note_time):
earliest_note_time = event[1]
if len(new_track) > 0:
for c in channel2patch_num:
new_track.append(['patch_change',start_time,c,channel2patch_num[c]])
new_track.append(['set_tempo', start_time, set_tempo_num])
new_score.append(new_track)
i += 1
_clean_up_warnings()
return new_score
def score_type(opus_or_score=None):
r'''Returns a string, either 'opus' or 'score' or ''
'''
if opus_or_score == None or str(type(opus_or_score)).find('list')<0 or len(opus_or_score) < 2:
return ''
i = 1 # ignore first element
while i < len(opus_or_score):
for event in opus_or_score[i]:
if event[0] == 'note':
return 'score'
elif event[0] == 'note_on':
return 'opus'
i += 1
return ''
def concatenate_scores(scores):
r'''Concatenates a list of scores into one score.
If the scores differ in their "ticks" parameter,
they will all get converted to millisecond-tick format.
'''
# the deepcopys are needed if the input_score's are refs to the same obj
# e.g. if invoked by midisox's repeat()
input_scores = _consistentise_ticks(scores) # 3.7
output_score = copy.deepcopy(input_scores[0])
for input_score in input_scores[1:]:
output_stats = score2stats(output_score)
delta_ticks = output_stats['nticks']
itrack = 1
while itrack < len(input_score):
if itrack >= len(output_score): # new output track if doesn't exist
output_score.append([])
for event in input_score[itrack]:
output_score[itrack].append(copy.deepcopy(event))
output_score[itrack][-1][1] += delta_ticks
itrack += 1
return output_score
def merge_scores(scores):
r'''Merges a list of scores into one score. A merged score comprises
all of the tracks from all of the input scores; un-merging is possible
by selecting just some of the tracks. If the scores differ in their
"ticks" parameter, they will all get converted to millisecond-tick
format. merge_scores attempts to resolve channel-conflicts,
but there are of course only 15 available channels...
'''
input_scores = _consistentise_ticks(scores) # 3.6
output_score = [1000]
channels_so_far = set()
all_channels = {0,1,2,3,4,5,6,7,8,10,11,12,13,14,15}
global Event2channelindex
for input_score in input_scores:
new_channels = set(score2stats(input_score).get('channels_total', []))
new_channels.discard(9) # 2.8 cha9 must remain cha9 (in GM)
for channel in channels_so_far & new_channels:
# consistently choose lowest avaiable, to ease testing
free_channels = list(all_channels - (channels_so_far|new_channels))
if len(free_channels) > 0:
free_channels.sort()
free_channel = free_channels[0]
else:
free_channel = None
break
itrack = 1
while itrack < len(input_score):
for input_event in input_score[itrack]:
channel_index=Event2channelindex.get(input_event[0],False)
if channel_index and input_event[channel_index]==channel:
input_event[channel_index] = free_channel
itrack += 1
channels_so_far.add(free_channel)
channels_so_far |= new_channels
output_score.extend(input_score[1:])
return output_score
def _ticks(event):
return event[1]
def mix_opus_tracks(input_tracks): # 5.5
r'''Mixes an array of tracks into one track. A mixed track
cannot be un-mixed. It is assumed that the tracks share the same
ticks parameter and the same tempo.
Mixing score-tracks is trivial (just insert all events into one array).
Mixing opus-tracks is only slightly harder, but it's common enough
that a dedicated function is useful.
'''
output_score = [1000, []]
for input_track in input_tracks: # 5.8
input_score = opus2score([1000, input_track])
for event in input_score[1]:
output_score[1].append(event)
output_score[1].sort(key=_ticks)
output_opus = score2opus(output_score)
return output_opus[1]
def mix_scores(scores):
r'''Mixes a list of scores into one one-track score.
A mixed score cannot be un-mixed. Hopefully the scores
have no undesirable channel-conflicts between them.
If the scores differ in their "ticks" parameter,
they will all get converted to millisecond-tick format.
'''
input_scores = _consistentise_ticks(scores) # 3.6
output_score = [1000, []]
for input_score in input_scores:
for input_track in input_score[1:]:
output_score[1].extend(input_track)
return output_score
def score2stats(opus_or_score=None):
r'''Returns a dict of some basic stats about the score, like
bank_select (list of tuples (msb,lsb)),
channels_by_track (list of lists), channels_total (set),
general_midi_mode (list),
ntracks, nticks, patch_changes_by_track (list of dicts),
num_notes_by_channel (list of numbers),
patch_changes_total (set),
percussion (dict histogram of channel 9 events),
pitches (dict histogram of pitches on channels other than 9),
pitch_range_by_track (list, by track, of two-member-tuples),
pitch_range_sum (sum over tracks of the pitch_ranges),
'''
bank_select_msb = -1
bank_select_lsb = -1
bank_select = []
channels_by_track = []
channels_total = set([])
general_midi_mode = []
num_notes_by_channel = dict([])
patches_used_by_track = []
patches_used_total = set([])
patch_changes_by_track = []
patch_changes_total = set([])
percussion = dict([]) # histogram of channel 9 "pitches"
pitches = dict([]) # histogram of pitch-occurrences channels 0-8,10-15
pitch_range_sum = 0 # u pitch-ranges of each track
pitch_range_by_track = []
is_a_score = True
if opus_or_score == None:
return {'bank_select':[], 'channels_by_track':[], 'channels_total':[],
'general_midi_mode':[], 'ntracks':0, 'nticks':0,
'num_notes_by_channel':dict([]),
'patch_changes_by_track':[], 'patch_changes_total':[],
'percussion':{}, 'pitches':{}, 'pitch_range_by_track':[],
'ticks_per_quarter':0, 'pitch_range_sum':0}
ticks_per_quarter = opus_or_score[0]
i = 1 # ignore first element, which is ticks
nticks = 0
while i < len(opus_or_score):
highest_pitch = 0
lowest_pitch = 128
channels_this_track = set([])
patch_changes_this_track = dict({})
for event in opus_or_score[i]:
if event[0] == 'note':
num_notes_by_channel[event[3]] = num_notes_by_channel.get(event[3],0) + 1
if event[3] == 9:
percussion[event[4]] = percussion.get(event[4],0) + 1
else:
pitches[event[4]] = pitches.get(event[4],0) + 1
if event[4] > highest_pitch:
highest_pitch = event[4]
if event[4] < lowest_pitch:
lowest_pitch = event[4]
channels_this_track.add(event[3])
channels_total.add(event[3])
finish_time = event[1] + event[2]
if finish_time > nticks:
nticks = finish_time
elif event[0] == 'note_off' or (event[0] == 'note_on' and event[4] == 0): # 4.8
finish_time = event[1]
if finish_time > nticks:
nticks = finish_time
elif event[0] == 'note_on':
is_a_score = False
num_notes_by_channel[event[2]] = num_notes_by_channel.get(event[2],0) + 1
if event[2] == 9:
percussion[event[3]] = percussion.get(event[3],0) + 1
else:
pitches[event[3]] = pitches.get(event[3],0) + 1
if event[3] > highest_pitch:
highest_pitch = event[3]
if event[3] < lowest_pitch:
lowest_pitch = event[3]
channels_this_track.add(event[2])
channels_total.add(event[2])
elif event[0] == 'patch_change':
patch_changes_this_track[event[2]] = event[3]
patch_changes_total.add(event[3])
elif event[0] == 'control_change':
if event[3] == 0: # bank select MSB
bank_select_msb = event[4]
elif event[3] == 32: # bank select LSB
bank_select_lsb = event[4]
if bank_select_msb >= 0 and bank_select_lsb >= 0:
bank_select.append((bank_select_msb,bank_select_lsb))
bank_select_msb = -1
bank_select_lsb = -1
elif event[0] == 'sysex_f0':
if _sysex2midimode.get(event[2], -1) >= 0:
general_midi_mode.append(_sysex2midimode.get(event[2]))
if is_a_score:
if event[1] > nticks:
nticks = event[1]
else:
nticks += event[1]
if lowest_pitch == 128:
lowest_pitch = 0
channels_by_track.append(channels_this_track)
patch_changes_by_track.append(patch_changes_this_track)
pitch_range_by_track.append((lowest_pitch,highest_pitch))
pitch_range_sum += (highest_pitch-lowest_pitch)
i += 1
return {'bank_select':bank_select,
'channels_by_track':channels_by_track,
'channels_total':channels_total,
'general_midi_mode':general_midi_mode,
'ntracks':len(opus_or_score)-1,
'nticks':nticks,
'num_notes_by_channel':num_notes_by_channel,
'patch_changes_by_track':patch_changes_by_track,
'patch_changes_total':patch_changes_total,
'percussion':percussion,
'pitches':pitches,
'pitch_range_by_track':pitch_range_by_track,
'pitch_range_sum':pitch_range_sum,
'ticks_per_quarter':ticks_per_quarter}
#----------------------------- Event stuff --------------------------
_sysex2midimode = {
"\x7E\x7F\x09\x01\xF7": 1,
"\x7E\x7F\x09\x02\xF7": 0,
"\x7E\x7F\x09\x03\xF7": 2,
}
# Some public-access tuples:
MIDI_events = tuple('''note_off note_on key_after_touch
control_change patch_change channel_after_touch
pitch_wheel_change'''.split())
Text_events = tuple('''text_event copyright_text_event
track_name instrument_name lyric marker cue_point text_event_08
text_event_09 text_event_0a text_event_0b text_event_0c
text_event_0d text_event_0e text_event_0f'''.split())
Nontext_meta_events = tuple('''end_track set_tempo
smpte_offset time_signature key_signature sequencer_specific
raw_meta_event sysex_f0 sysex_f7 song_position song_select
tune_request'''.split())
# unsupported: raw_data
# Actually, 'tune_request' is is F-series event, not strictly a meta-event...
Meta_events = Text_events + Nontext_meta_events
All_events = MIDI_events + Meta_events
# And three dictionaries:
Number2patch = { # General MIDI patch numbers:
0:'Acoustic Grand',
1:'Bright Acoustic',
2:'Electric Grand',
3:'Honky-Tonk',
4:'Electric Piano 1',
5:'Electric Piano 2',
6:'Harpsichord',
7:'Clav',
8:'Celesta',
9:'Glockenspiel',
10:'Music Box',
11:'Vibraphone',
12:'Marimba',
13:'Xylophone',
14:'Tubular Bells',
15:'Dulcimer',
16:'Drawbar Organ',
17:'Percussive Organ',
18:'Rock Organ',
19:'Church Organ',
20:'Reed Organ',
21:'Accordion',
22:'Harmonica',
23:'Tango Accordion',
24:'Acoustic Guitar(nylon)',
25:'Acoustic Guitar(steel)',
26:'Electric Guitar(jazz)',
27:'Electric Guitar(clean)',
28:'Electric Guitar(muted)',
29:'Overdriven Guitar',
30:'Distortion Guitar',
31:'Guitar Harmonics',
32:'Acoustic Bass',
33:'Electric Bass(finger)',
34:'Electric Bass(pick)',
35:'Fretless Bass',
36:'Slap Bass 1',
37:'Slap Bass 2',
38:'Synth Bass 1',
39:'Synth Bass 2',
40:'Violin',
41:'Viola',
42:'Cello',
43:'Contrabass',
44:'Tremolo Strings',
45:'Pizzicato Strings',
46:'Orchestral Harp',
47:'Timpani',
48:'String Ensemble 1',
49:'String Ensemble 2',
50:'SynthStrings 1',
51:'SynthStrings 2',
52:'Choir Aahs',
53:'Voice Oohs',
54:'Synth Voice',
55:'Orchestra Hit',
56:'Trumpet',
57:'Trombone',
58:'Tuba',
59:'Muted Trumpet',
60:'French Horn',
61:'Brass Section',
62:'SynthBrass 1',
63:'SynthBrass 2',
64:'Soprano Sax',
65:'Alto Sax',
66:'Tenor Sax',
67:'Baritone Sax',
68:'Oboe',
69:'English Horn',
70:'Bassoon',
71:'Clarinet',
72:'Piccolo',
73:'Flute',
74:'Recorder',
75:'Pan Flute',
76:'Blown Bottle',
77:'Skakuhachi',
78:'Whistle',
79:'Ocarina',
80:'Lead 1 (square)',
81:'Lead 2 (sawtooth)',
82:'Lead 3 (calliope)',
83:'Lead 4 (chiff)',
84:'Lead 5 (charang)',
85:'Lead 6 (voice)',
86:'Lead 7 (fifths)',
87:'Lead 8 (bass+lead)',
88:'Pad 1 (new age)',
89:'Pad 2 (warm)',
90:'Pad 3 (polysynth)',
91:'Pad 4 (choir)',
92:'Pad 5 (bowed)',
93:'Pad 6 (metallic)',
94:'Pad 7 (halo)',
95:'Pad 8 (sweep)',
96:'FX 1 (rain)',
97:'FX 2 (soundtrack)',
98:'FX 3 (crystal)',
99:'FX 4 (atmosphere)',
100:'FX 5 (brightness)',
101:'FX 6 (goblins)',
102:'FX 7 (echoes)',
103:'FX 8 (sci-fi)',
104:'Sitar',
105:'Banjo',
106:'Shamisen',
107:'Koto',
108:'Kalimba',
109:'Bagpipe',
110:'Fiddle',
111:'Shanai',
112:'Tinkle Bell',
113:'Agogo',
114:'Steel Drums',
115:'Woodblock',
116:'Taiko Drum',
117:'Melodic Tom',
118:'Synth Drum',
119:'Reverse Cymbal',
120:'Guitar Fret Noise',
121:'Breath Noise',
122:'Seashore',
123:'Bird Tweet',
124:'Telephone Ring',
125:'Helicopter',
126:'Applause',
127:'Gunshot',
}
Notenum2percussion = { # General MIDI Percussion (on Channel 9):
35:'Acoustic Bass Drum',
36:'Bass Drum 1',
37:'Side Stick',
38:'Acoustic Snare',
39:'Hand Clap',
40:'Electric Snare',
41:'Low Floor Tom',
42:'Closed Hi-Hat',
43:'High Floor Tom',
44:'Pedal Hi-Hat',
45:'Low Tom',
46:'Open Hi-Hat',
47:'Low-Mid Tom',
48:'Hi-Mid Tom',
49:'Crash Cymbal 1',
50:'High Tom',
51:'Ride Cymbal 1',
52:'Chinese Cymbal',
53:'Ride Bell',
54:'Tambourine',
55:'Splash Cymbal',
56:'Cowbell',
57:'Crash Cymbal 2',
58:'Vibraslap',
59:'Ride Cymbal 2',
60:'Hi Bongo',
61:'Low Bongo',
62:'Mute Hi Conga',
63:'Open Hi Conga',
64:'Low Conga',
65:'High Timbale',
66:'Low Timbale',
67:'High Agogo',
68:'Low Agogo',
69:'Cabasa',
70:'Maracas',
71:'Short Whistle',
72:'Long Whistle',
73:'Short Guiro',
74:'Long Guiro',
75:'Claves',
76:'Hi Wood Block',
77:'Low Wood Block',
78:'Mute Cuica',
79:'Open Cuica',
80:'Mute Triangle',
81:'Open Triangle',
}
Event2channelindex = { 'note':3, 'note_off':2, 'note_on':2,
'key_after_touch':2, 'control_change':2, 'patch_change':2,
'channel_after_touch':2, 'pitch_wheel_change':2
}
################################################################
# The code below this line is full of frightening things, all to
# do with the actual encoding and decoding of binary MIDI data.
def _twobytes2int(byte_a):
r'''decode a 16 bit quantity from two bytes,'''
return (byte_a[1] | (byte_a[0] << 8))
def _int2twobytes(int_16bit):
r'''encode a 16 bit quantity into two bytes,'''
return bytes([(int_16bit>>8) & 0xFF, int_16bit & 0xFF])
def _read_14_bit(byte_a):
r'''decode a 14 bit quantity from two bytes,'''
return (byte_a[0] | (byte_a[1] << 7))
def _write_14_bit(int_14bit):
r'''encode a 14 bit quantity into two bytes,'''
return bytes([int_14bit & 0x7F, (int_14bit>>7) & 0x7F])
def _ber_compressed_int(integer):
r'''BER compressed integer (not an ASN.1 BER, see perlpacktut for
details). Its bytes represent an unsigned integer in base 128,
most significant digit first, with as few digits as possible.
Bit eight (the high bit) is set on each byte except the last.
'''
ber = bytearray(b'')
seven_bits = 0x7F & integer
ber.insert(0, seven_bits) # XXX surely should convert to a char ?
integer >>= 7
while integer > 0:
seven_bits = 0x7F & integer
ber.insert(0, 0x80|seven_bits) # XXX surely should convert to a char ?
integer >>= 7
return ber
def _unshift_ber_int(ba):
r'''Given a bytearray, returns a tuple of (the ber-integer at the
start, and the remainder of the bytearray).
'''
byte = ba.pop(0)
integer = 0
while True:
integer += (byte & 0x7F)
if not (byte & 0x80):
return ((integer, ba))
if not len(ba):
_warn('_unshift_ber_int: no end-of-integer found')
return ((0, ba))
byte = ba.pop(0)
integer <<= 7
def _clean_up_warnings(): # 5.4
# Call this before returning from any publicly callable function
# whenever there's a possibility that a warning might have been printed
# by the function, or by any private functions it might have called.
global _previous_times
global _previous_warning
if _previous_times > 1:
print(' previous message repeated '+str(_previous_times)+' times', file=sys.stderr)
elif _previous_times > 0:
print(' previous message repeated', file=sys.stderr)
_previous_times = 0
_previous_warning = ''
def _warn(s=''):
global _previous_times
global _previous_warning
if s == _previous_warning: # 5.4
_previous_times = _previous_times + 1
else:
_clean_up_warnings()
print(str(s), file=sys.stderr)
_previous_warning = s
def _some_text_event(which_kind=0x01, text='some_text'):
# if which_kind == 0x7F: # 6.1 sequencer_specific data can be 8-bit
data = bytes(text, encoding='ISO-8859-1') # 6.2 and also text data!
# else: data = bytes(text, encoding='ascii')
return b'\xFF'+bytes((which_kind,))+_ber_compressed_int(len(data))+data
def _consistentise_ticks(scores): # 3.6
# used by mix_scores, merge_scores, concatenate_scores
if len(scores) == 1:
return copy.deepcopy(scores)
are_consistent = True
ticks = scores[0][0]
iscore = 1
while iscore < len(scores):
if scores[iscore][0] != ticks:
are_consistent = False
break
iscore += 1
if are_consistent:
return copy.deepcopy(scores)
new_scores = []
iscore = 0
while iscore < len(scores):
score = scores[iscore]
new_scores.append(opus2score(to_millisecs(score2opus(score))))
iscore += 1
return new_scores
###########################################################################
def _decode(trackdata=b'', exclude=None, include=None,
event_callback=None, exclusive_event_callback=None, no_eot_magic=False):
r'''Decodes MIDI track data into an opus-style list of events.
The options:
'exclude' is a list of event types which will be ignored SHOULD BE A SET
'include' (and no exclude), makes exclude a list
of all possible events, /minus/ what include specifies
'event_callback' is a coderef
'exclusive_event_callback' is a coderef
'''
trackdata = bytearray(trackdata)
if exclude == None:
exclude = []
if include == None:
include = []
if include and not exclude:
exclude = All_events
include = set(include)
exclude = set(exclude)
# Pointer = 0; not used here; we eat through the bytearray instead.
event_code = -1; # used for running status
event_count = 0;
events = []
while(len(trackdata)):
# loop while there's anything to analyze ...
eot = False # When True, the event registrar aborts this loop
event_count += 1
E = []
# E for events - we'll feed it to the event registrar at the end.
# Slice off the delta time code, and analyze it
[time, remainder] = _unshift_ber_int(trackdata)
# Now let's see what we can make of the command
first_byte = trackdata.pop(0) & 0xFF
if (first_byte < 0xF0): # It's a MIDI event
if (first_byte & 0x80):
event_code = first_byte
else:
# It wants running status; use last event_code value
trackdata.insert(0, first_byte)
if (event_code == -1):
_warn("Running status not set; Aborting track.")
return []
command = event_code & 0xF0
channel = event_code & 0x0F
if (command == 0xF6): # 0-byte argument
pass
elif (command == 0xC0 or command == 0xD0): # 1-byte argument
parameter = trackdata.pop(0) # could be B
else: # 2-byte argument could be BB or 14-bit
parameter = (trackdata.pop(0), trackdata.pop(0))
#################################################################
# MIDI events
if (command == 0x80):
if 'note_off' in exclude:
continue
E = ['note_off', time, channel, parameter[0], parameter[1]]
elif (command == 0x90):
if 'note_on' in exclude:
continue
E = ['note_on', time, channel, parameter[0], parameter[1]]
elif (command == 0xA0):
if 'key_after_touch' in exclude:
continue
E = ['key_after_touch',time,channel,parameter[0],parameter[1]]
elif (command == 0xB0):
if 'control_change' in exclude:
continue
E = ['control_change',time,channel,parameter[0],parameter[1]]
elif (command == 0xC0):
if 'patch_change' in exclude:
continue
E = ['patch_change', time, channel, parameter]
elif (command == 0xD0):
if 'channel_after_touch' in exclude:
continue
E = ['channel_after_touch', time, channel, parameter]
elif (command == 0xE0):
if 'pitch_wheel_change' in exclude:
continue
E = ['pitch_wheel_change', time, channel,
_read_14_bit(parameter)-0x2000]
else:
_warn("Shouldn't get here; command="+hex(command))
elif (first_byte == 0xFF): # It's a Meta-Event! ##################
#[command, length, remainder] =
# unpack("xCwa*", substr(trackdata, $Pointer, 6));
#Pointer += 6 - len(remainder);
# # Move past JUST the length-encoded.
command = trackdata.pop(0) & 0xFF
[length, trackdata] = _unshift_ber_int(trackdata)
if (command == 0x00):
if (length == 2):
E = ['set_sequence_number',time,_twobytes2int(trackdata)]
else:
_warn('set_sequence_number: length must be 2, not '+str(length))
E = ['set_sequence_number', time, 0]
elif command >= 0x01 and command <= 0x0f: # Text events
# 6.2 take it in bytes; let the user get the right encoding.
# text_str = trackdata[0:length].decode('ascii','ignore')
text_str = trackdata[0:length].decode('ISO-8859-1')
# Defined text events
if (command == 0x01):
E = ['text_event', time, text_str]
elif (command == 0x02):
E = ['copyright_text_event', time, text_str]
elif (command == 0x03):
E = ['track_name', time, text_str]
elif (command == 0x04):
E = ['instrument_name', time, text_str]
elif (command == 0x05):
E = ['lyric', time, text_str]
elif (command == 0x06):
E = ['marker', time, text_str]
elif (command == 0x07):
E = ['cue_point', time, text_str]
# Reserved but apparently unassigned text events
elif (command == 0x08):
E = ['text_event_08', time, text_str]
elif (command == 0x09):
E = ['text_event_09', time, text_str]
elif (command == 0x0a):
E = ['text_event_0a', time, text_str]
elif (command == 0x0b):
E = ['text_event_0b', time, text_str]
elif (command == 0x0c):
E = ['text_event_0c', time, text_str]
elif (command == 0x0d):
E = ['text_event_0d', time, text_str]
elif (command == 0x0e):
E = ['text_event_0e', time, text_str]
elif (command == 0x0f):
E = ['text_event_0f', time, text_str]
# Now the sticky events -------------------------------------
elif (command == 0x2F):
E = ['end_track', time]
# The code for handling this, oddly, comes LATER,
# in the event registrar.
elif (command == 0x51): # DTime, Microseconds/Crochet
if length != 3:
_warn('set_tempo event, but length='+str(length))
E = ['set_tempo', time,
struct.unpack(">I", b'\x00'+trackdata[0:3])[0]]
elif (command == 0x54):
if length != 5: # DTime, HR, MN, SE, FR, FF
_warn('smpte_offset event, but length='+str(length))
E = ['smpte_offset',time] + list(struct.unpack(">BBBBB",trackdata[0:5]))
elif (command == 0x58):
if length != 4: # DTime, NN, DD, CC, BB
_warn('time_signature event, but length='+str(length))
E = ['time_signature', time]+list(trackdata[0:4])
elif (command == 0x59):
if length != 2: # DTime, SF(signed), MI
_warn('key_signature event, but length='+str(length))
E = ['key_signature',time] + list(struct.unpack(">bB",trackdata[0:2]))
elif (command == 0x7F):
E = ['sequencer_specific',time,
trackdata[0:length].decode('ISO-8859-1')] # 6.0
else:
E = ['raw_meta_event', time, command,
trackdata[0:length].decode('ISO-8859-1')] # 6.0
#"[uninterpretable meta-event command of length length]"
# DTime, Command, Binary Data
# It's uninterpretable; record it as raw_data.
# Pointer += length; # Now move Pointer
trackdata = trackdata[length:]
######################################################################
elif (first_byte == 0xF0 or first_byte == 0xF7):
# Note that sysexes in MIDI /files/ are different than sysexes
# in MIDI transmissions!! The vast majority of system exclusive
# messages will just use the F0 format. For instance, the
# transmitted message F0 43 12 00 07 F7 would be stored in a
# MIDI file as F0 05 43 12 00 07 F7. As mentioned above, it is
# required to include the F7 at the end so that the reader of the
# MIDI file knows that it has read the entire message. (But the F7
# is omitted if this is a non-final block in a multiblock sysex;
# but the F7 (if there) is counted in the message's declared
# length, so we don't have to think about it anyway.)
#command = trackdata.pop(0)
[length, trackdata] = _unshift_ber_int(trackdata)
if first_byte == 0xF0:
# 20091008 added ISO-8859-1 to get an 8-bit str
E = ['sysex_f0', time, trackdata[0:length].decode('ISO-8859-1')]
else:
E = ['sysex_f7', time, trackdata[0:length].decode('ISO-8859-1')]
trackdata = trackdata[length:]
######################################################################
# Now, the MIDI file spec says:
# <track data> = <MTrk event>+
# <MTrk event> = <delta-time> <event>
# <event> = <MIDI event> | <sysex event> | <meta-event>
# I know that, on the wire, <MIDI event> can include note_on,
# note_off, and all the other 8x to Ex events, AND Fx events
# other than F0, F7, and FF -- namely, <song position msg>,
# <song select msg>, and <tune request>.
#
# Whether these can occur in MIDI files is not clear specified
# from the MIDI file spec. So, I'm going to assume that
# they CAN, in practice, occur. I don't know whether it's
# proper for you to actually emit these into a MIDI file.
elif (first_byte == 0xF2): # DTime, Beats
# <song position msg> ::= F2 <data pair>
E = ['song_position', time, _read_14_bit(trackdata[:2])]
trackdata = trackdata[2:]
elif (first_byte == 0xF3): # <song select msg> ::= F3 <data singlet>
# E = ['song_select', time, struct.unpack('>B',trackdata.pop(0))[0]]
E = ['song_select', time, trackdata[0]]
trackdata = trackdata[1:]
# DTime, Thing (what?! song number? whatever ...)
elif (first_byte == 0xF6): # DTime
E = ['tune_request', time]
# What would a tune request be doing in a MIDI /file/?
#########################################################
# ADD MORE META-EVENTS HERE. TODO:
# f1 -- MTC Quarter Frame Message. One data byte follows
# the Status; it's the time code value, from 0 to 127.
# f8 -- MIDI clock. no data.
# fa -- MIDI start. no data.
# fb -- MIDI continue. no data.
# fc -- MIDI stop. no data.
# fe -- Active sense. no data.
# f4 f5 f9 fd -- unallocated
r'''
elif (first_byte > 0xF0) { # Some unknown kinda F-series event ####
# Here we only produce a one-byte piece of raw data.
# But the encoder for 'raw_data' accepts any length of it.
E = [ 'raw_data',
time, substr(trackdata,Pointer,1) ]
# DTime and the Data (in this case, the one Event-byte)
++Pointer; # itself
'''
elif first_byte > 0xF0: # Some unknown F-series event
# Here we only produce a one-byte piece of raw data.
E = ['raw_data', time, trackdata[0].decode('ISO-8859-1')]
trackdata = trackdata[1:]
else: # Fallthru.
_warn("Aborting track. Command-byte first_byte="+hex(first_byte))
break
# End of the big if-group
######################################################################
# THE EVENT REGISTRAR...
if E and (E[0] == 'end_track'):
# This is the code for exceptional handling of the EOT event.
eot = True
if not no_eot_magic:
if E[1] > 0: # a null text-event to carry the delta-time
E = ['text_event', E[1], '']
else:
E = [] # EOT with a delta-time of 0; ignore it.
if E and not (E[0] in exclude):
#if ( $exclusive_event_callback ):
# &{ $exclusive_event_callback }( @E );
#else:
# &{ $event_callback }( @E ) if $event_callback;
events.append(E)
if eot:
break
# End of the big "Event" while-block
return events
###########################################################################
def _encode(events_lol, unknown_callback=None, never_add_eot=False,
no_eot_magic=False, no_running_status=False):
# encode an event structure, presumably for writing to a file
# Calling format:
# $data_r = MIDI::Event::encode( \@event_lol, { options } );
# Takes a REFERENCE to an event structure (a LoL)
# Returns an (unblessed) REFERENCE to track data.
# If you want to use this to encode a /single/ event,
# you still have to do it as a reference to an event structure (a LoL)
# that just happens to have just one event. I.e.,
# encode( [ $event ] ) or encode( [ [ 'note_on', 100, 5, 42, 64] ] )
# If you're doing this, consider the never_add_eot track option, as in
# print MIDI ${ encode( [ $event], { 'never_add_eot' => 1} ) };
data = [] # what I'll store the chunks of byte-data in
# This is so my end_track magic won't corrupt the original
events = copy.deepcopy(events_lol)
if not never_add_eot:
# One way or another, tack on an 'end_track'
if events:
last = events[-1]
if not (last[0] == 'end_track'): # no end_track already
if (last[0] == 'text_event' and len(last[2]) == 0):
# 0-length text event at track-end.
if no_eot_magic:
# Exceptional case: don't mess with track-final
# 0-length text_events; just peg on an end_track
events.append(['end_track', 0])
else:
# NORMAL CASE: replace with an end_track, leaving DTime
last[0] = 'end_track'
else:
# last event was neither 0-length text_event nor end_track
events.append(['end_track', 0])
else: # an eventless track!
events = [['end_track', 0],]
# maybe_running_status = not no_running_status # unused? 4.7
last_status = -1
for event_r in (events):
E = copy.deepcopy(event_r)
# otherwise the shifting'd corrupt the original
if not E:
continue
event = E.pop(0)
if not len(event):
continue
dtime = int(E.pop(0))
# print('event='+str(event)+' dtime='+str(dtime))
event_data = ''
if ( # MIDI events -- eligible for running status
event == 'note_on'
or event == 'note_off'
or event == 'control_change'
or event == 'key_after_touch'
or event == 'patch_change'
or event == 'channel_after_touch'
or event == 'pitch_wheel_change' ):
# This block is where we spend most of the time. Gotta be tight.
if (event == 'note_off'):
status = 0x80 | (int(E[0]) & 0x0F)
parameters = struct.pack('>BB', int(E[1])&0x7F, int(E[2])&0x7F)
elif (event == 'note_on'):
status = 0x90 | (int(E[0]) & 0x0F)
parameters = struct.pack('>BB', int(E[1])&0x7F, int(E[2])&0x7F)
elif (event == 'key_after_touch'):
status = 0xA0 | (int(E[0]) & 0x0F)
parameters = struct.pack('>BB', int(E[1])&0x7F, int(E[2])&0x7F)
elif (event == 'control_change'):
status = 0xB0 | (int(E[0]) & 0x0F)
parameters = struct.pack('>BB', int(E[1])&0xFF, int(E[2])&0xFF)
elif (event == 'patch_change'):
status = 0xC0 | (int(E[0]) & 0x0F)
parameters = struct.pack('>B', int(E[1]) & 0xFF)
elif (event == 'channel_after_touch'):
status = 0xD0 | (int(E[0]) & 0x0F)
parameters = struct.pack('>B', int(E[1]) & 0xFF)
elif (event == 'pitch_wheel_change'):
status = 0xE0 | (int(E[0]) & 0x0F)
parameters = _write_14_bit(int(E[1]) + 0x2000)
else:
_warn("BADASS FREAKOUT ERROR 31415!")
# And now the encoding
# w = BER compressed integer (not ASN.1 BER, see perlpacktut for
# details). Its bytes represent an unsigned integer in base 128,
# most significant digit first, with as few digits as possible.
# Bit eight (the high bit) is set on each byte except the last.
data.append(_ber_compressed_int(dtime))
if (status != last_status) or no_running_status:
data.append(struct.pack('>B', status))
data.append(parameters)
last_status = status
continue
else:
# Not a MIDI event.
# All the code in this block could be more efficient,
# but this is not where the code needs to be tight.
# print "zaz $event\n";
last_status = -1
if event == 'raw_meta_event':
event_data = _some_text_event(int(E[0]), E[1])
elif (event == 'set_sequence_number'): # 3.9
event_data = b'\xFF\x00\x02'+_int2twobytes(E[0])
# Text meta-events...
# a case for a dict, I think (pjb) ...
elif (event == 'text_event'):
event_data = _some_text_event(0x01, E[0])
elif (event == 'copyright_text_event'):
event_data = _some_text_event(0x02, E[0])
elif (event == 'track_name'):
event_data = _some_text_event(0x03, E[0])
elif (event == 'instrument_name'):
event_data = _some_text_event(0x04, E[0])
elif (event == 'lyric'):
event_data = _some_text_event(0x05, E[0])
elif (event == 'marker'):
event_data = _some_text_event(0x06, E[0])
elif (event == 'cue_point'):
event_data = _some_text_event(0x07, E[0])
elif (event == 'text_event_08'):
event_data = _some_text_event(0x08, E[0])
elif (event == 'text_event_09'):
event_data = _some_text_event(0x09, E[0])
elif (event == 'text_event_0a'):
event_data = _some_text_event(0x0A, E[0])
elif (event == 'text_event_0b'):
event_data = _some_text_event(0x0B, E[0])
elif (event == 'text_event_0c'):
event_data = _some_text_event(0x0C, E[0])
elif (event == 'text_event_0d'):
event_data = _some_text_event(0x0D, E[0])
elif (event == 'text_event_0e'):
event_data = _some_text_event(0x0E, E[0])
elif (event == 'text_event_0f'):
event_data = _some_text_event(0x0F, E[0])
# End of text meta-events
elif (event == 'end_track'):
event_data = b"\xFF\x2F\x00"
elif (event == 'set_tempo'):
#event_data = struct.pack(">BBwa*", 0xFF, 0x51, 3,
# substr( struct.pack('>I', E[0]), 1, 3))
event_data = b'\xFF\x51\x03'+struct.pack('>I',E[0])[1:]
elif (event == 'smpte_offset'):
# event_data = struct.pack(">BBwBBBBB", 0xFF, 0x54, 5, E[0:5] )
event_data = struct.pack(">BBBbBBBB", 0xFF,0x54,0x05,E[0],E[1],E[2],E[3],E[4])
elif (event == 'time_signature'):
# event_data = struct.pack(">BBwBBBB", 0xFF, 0x58, 4, E[0:4] )
event_data = struct.pack(">BBBbBBB", 0xFF, 0x58, 0x04, E[0],E[1],E[2],E[3])
elif (event == 'key_signature'):
event_data = struct.pack(">BBBbB", 0xFF, 0x59, 0x02, E[0],E[1])
elif (event == 'sequencer_specific'):
# event_data = struct.pack(">BBwa*", 0xFF,0x7F, len(E[0]), E[0])
event_data = _some_text_event(0x7F, E[0])
# End of Meta-events
# Other Things...
elif (event == 'sysex_f0'):
#event_data = struct.pack(">Bwa*", 0xF0, len(E[0]), E[0])
#B=bitstring w=BER-compressed-integer a=null-padded-ascii-str
event_data = bytearray(b'\xF0')+_ber_compressed_int(len(E[0]))+bytearray(bytes(E[0],encoding='ISO-8859-1'))
elif (event == 'sysex_f7'):
#event_data = struct.pack(">Bwa*", 0xF7, len(E[0]), E[0])
event_data = bytearray(b'\xF7')+_ber_compressed_int(len(E[0]))+bytearray(bytes(E[0],encoding='ISO-8859-1'))
elif (event == 'song_position'):
event_data = b"\xF2" + _write_14_bit( E[0] )
elif (event == 'song_select'):
event_data = struct.pack('>BB', 0xF3, E[0] )
elif (event == 'tune_request'):
event_data = b"\xF6"
elif (event == 'raw_data'):
_warn("_encode: raw_data event not supported")
# event_data = E[0]
continue
# End of Other Stuff
else:
# The Big Fallthru
if unknown_callback:
# push(@data, &{ $unknown_callback }( @$event_r ))
pass
else:
_warn("Unknown event: "+str(event))
# To surpress complaint here, just set
# 'unknown_callback' => sub { return () }
continue
#print "Event $event encoded part 2\n"
if str(type(event_data)).find('str') >= 0:
event_data = bytearray(event_data.encode('Latin1', 'ignore'))
if len(event_data): # how could $event_data be empty
# data.append(struct.pack('>wa*', dtime, event_data))
# print(' event_data='+str(event_data))
data.append(_ber_compressed_int(dtime)+event_data)
return b''.join(data)
|
droveio/drove
|
refs/heads/master
|
doc/source/conf.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# drove.io documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 13 12:16:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_drove_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'drove'
copyright = '2014, drove.io'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'drove'
html_theme = 'sphinx_drove_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ["_themes"]
html_theme_path = [sphinx_drove_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join("_static", "favicon.ico")
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'droveiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'droveio.tex', 'drove.io Documentation',
'Andrés J. Díaz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'droveio', 'drove.io Documentation',
['Andrés J. Díaz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'droveio', 'drove.io Documentation',
'Andrés J. Díaz', 'droveio', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
noam09/deluge-telegramer
|
refs/heads/master
|
telegramer/include/libfuturize/fixes/fix_division.py
|
11
|
"""
UNFINISHED
For the ``future`` package.
Adds this import line:
from __future__ import division
at the top so the code runs identically on Py3 and Py2.6/2.7
"""
from libpasteurize.fixes.fix_division import FixDivision
|
wiltonlazary/arangodb
|
refs/heads/devel
|
3rdParty/V8/V8-5.0.71.39/build/gyp/test/win/gyptest-link-pdb.py
|
181
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that the 'ProgramDatabaseFile' attribute in VCLinker is extracted
properly.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('program-database.gyp', chdir=CHDIR)
test.build('program-database.gyp', test.ALL, chdir=CHDIR)
def FindFile(pdb):
full_path = test.built_file_path(pdb, chdir=CHDIR)
return os.path.isfile(full_path)
# Verify the specified PDB is created when ProgramDatabaseFile
# is provided.
if not FindFile('name_outdir.pdb'):
test.fail_test()
if not FindFile('name_proddir.pdb'):
test.fail_test()
test.pass_test()
|
40223226/2015cdbg8
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/decimal.py
|
623
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
|
dominikl/openmicroscopy
|
refs/heads/develop
|
components/tools/test_setup.py
|
14
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyTest command class
setup.py files in each package should include a cmdclass mapping
from "test" to PyTest.
Copyright 2007-2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import sys
from setuptools.command.test import test as TestCommand
LIB = os.path.join("..", "..", "tests", "python")
sys.path.insert(0, LIB)
class PyTest(TestCommand):
user_options = [
('test-path=', 't', "base dir for test collection"),
('test-pythonpath=', 'p', "prepend 'pythonpath' to PYTHONPATH"),
('test-ice-config=', 'i',
"use specified 'ice config' file instead of default"),
('test-string=', 'k', "only run tests including 'string'"),
('test-marker=', 'm', "only run tests including 'marker'"),
('test-no-capture', 's', "don't suppress test output"),
('test-failfast', 'x', "Exit on first error"),
('test-verbose', 'v', "more verbose output"),
('test-quiet', 'q', "less verbose output"),
('junitxml=', None, "create junit-xml style report file at 'path'"),
('pdb', None, "fallback to pdb on error"),
('markers', None, "list available markers'"),
]
def initialize_options(self):
TestCommand.initialize_options(self)
self.test_pythonpath = None
self.test_ice_config = None
self.test_string = None
self.test_marker = None
self.test_path = None
self.test_no_capture = False
self.test_failfast = False
self.test_quiet = False
self.test_verbose = False
self.junitxml = None
self.pdb = False
self.markers = False
def finalize_options(self):
TestCommand.finalize_options(self)
if self.test_path is None:
self.test_path = 'test'
self.test_args = [self.test_path]
if self.test_string is not None:
self.test_args.extend(['-k', self.test_string])
if self.test_marker is not None:
self.test_args.extend(['-m', self.test_marker])
if self.test_no_capture:
self.test_args.extend(['-s'])
if self.test_failfast:
self.test_args.extend(['-x'])
if self.test_verbose:
self.test_args.extend(['-v'])
if self.test_quiet:
self.test_args.extend(['-q'])
if self.junitxml is not None:
self.test_args.extend(['--junitxml', self.junitxml])
if self.pdb:
self.test_args.extend(['--pdb'])
print self.test_failfast
self.test_suite = True
if self.markers:
self.test_args = "--markers"
if self.test_ice_config is None:
self.test_ice_config = os.path.abspath('ice.config')
if 'ICE_CONFIG' not in os.environ:
os.environ['ICE_CONFIG'] = self.test_ice_config
def run_tests(self):
if self.test_pythonpath is not None:
sys.path.insert(0, self.test_pythonpath)
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
|
momm3/WelcomeBot
|
refs/heads/master
|
welcomebot/Lib/site-packages/urllib3/util/url.py
|
203
|
from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ('http', 'https', None)
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
if scheme:
scheme = scheme.lower()
if host and scheme in NORMALIZABLE_SCHEMES:
host = host.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers. No whitespace, no plus or
# minus prefixes, no non-integer digits such as ^2 (superscript).
if not port.isdigit():
raise LocationParseError(url)
try:
port = int(port)
except ValueError:
raise LocationParseError(url)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
|
tuhangdi/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_rjust.py
|
521
|
from django.template.defaultfilters import rjust
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class RjustTests(SimpleTestCase):
@setup({'rjust01': '{% autoescape off %}.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.{% endautoescape %}'})
def test_rjust01(self):
output = self.engine.render_to_string('rjust01', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
@setup({'rjust02': '.{{ a|rjust:"5" }}. .{{ b|rjust:"5" }}.'})
def test_rjust02(self):
output = self.engine.render_to_string('rjust02', {"a": "a&b", "b": mark_safe("a&b")})
self.assertEqual(output, ". a&b. . a&b.")
class FunctionTests(SimpleTestCase):
def test_rjust(self):
self.assertEqual(rjust('test', 10), ' test')
def test_less_than_string_length(self):
self.assertEqual(rjust('test', 3), 'test')
def test_non_string_input(self):
self.assertEqual(rjust(123, 4), ' 123')
|
leviroth/praw
|
refs/heads/master
|
tests/unit/models/reddit/test_redditor.py
|
1
|
import pickle
import pytest
from praw.models import Redditor
from ... import UnitTest
class TestRedditor(UnitTest):
def test_equality(self):
redditor1 = Redditor(self.reddit, _data={"name": "dummy1", "n": 1})
redditor2 = Redditor(self.reddit, _data={"name": "Dummy1", "n": 2})
redditor3 = Redditor(self.reddit, _data={"name": "dummy3", "n": 2})
assert redditor1 == redditor1
assert redditor2 == redditor2
assert redditor3 == redditor3
assert redditor1 == redditor2
assert redditor2 != redditor3
assert redditor1 != redditor3
assert "dummy1" == redditor1
assert redditor2 == "dummy1"
def test_construct_failure(self):
message = "Either `name` or `_data` must be provided."
with pytest.raises(TypeError) as excinfo:
Redditor(self.reddit)
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
Redditor(self.reddit, "dummy", {"id": "dummy"})
assert str(excinfo.value) == message
with pytest.raises(AssertionError):
Redditor(self.reddit, _data=[{"name": "dummy"}])
with pytest.raises(AssertionError):
Redditor(self.reddit, _data={"notname": "dummy"})
def test_fullname(self):
redditor = Redditor(self.reddit, _data={"name": "name", "id": "dummy"})
assert redditor.fullname == "t2_dummy"
def test_guild__min(self):
with pytest.raises(TypeError) as excinfo:
Redditor(self.reddit, name="RedditorName").gild(0)
assert str(excinfo.value) == "months must be between 1 and 36"
def test_guild__max(self):
with pytest.raises(TypeError) as excinfo:
Redditor(self.reddit, name="RedditorName").gild(37)
assert str(excinfo.value) == "months must be between 1 and 36"
def test_hash(self):
redditor1 = Redditor(self.reddit, _data={"name": "dummy1", "n": 1})
redditor2 = Redditor(self.reddit, _data={"name": "Dummy1", "n": 2})
redditor3 = Redditor(self.reddit, _data={"name": "dummy3", "n": 2})
assert hash(redditor1) == hash(redditor1)
assert hash(redditor2) == hash(redditor2)
assert hash(redditor3) == hash(redditor3)
assert hash(redditor1) == hash(redditor2)
assert hash(redditor2) != hash(redditor3)
assert hash(redditor1) != hash(redditor3)
def test_pickle(self):
redditor = Redditor(self.reddit, _data={"name": "name", "id": "dummy"})
for level in range(pickle.HIGHEST_PROTOCOL + 1):
other = pickle.loads(pickle.dumps(redditor, protocol=level))
assert redditor == other
def test_repr(self):
redditor = Redditor(self.reddit, name="RedditorName")
assert repr(redditor) == "Redditor(name='RedditorName')"
def test_str(self):
redditor = Redditor(self.reddit, _data={"name": "name", "id": "dummy"})
assert str(redditor) == "name"
class TestRedditorListings(UnitTest):
def test__params_not_modified_in_mixed_listing(self):
params = {"dummy": "value"}
redditor = Redditor(self.reddit, name="spez")
for listing in ["controversial", "hot", "new", "top"]:
generator = getattr(redditor, listing)(params=params)
assert params == {"dummy": "value"}
assert listing == generator.params["sort"]
assert "value" == generator.params["dummy"]
|
alirizakeles/zato
|
refs/heads/dsuch-f-gh723-add-exe-agent
|
code/alembic/versions/0018_ed18fc6a_gh284_smtp.py
|
7
|
"""gh284 smtp
Revision ID: 0018_ed18fc6a
Revises: 0017_7baa0602
Create Date: 2014-07-18 15:07:05
"""
# revision identifiers, used by Alembic.
revision = '0018_ed18fc6a'
down_revision = '0017_7baa0602'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import CreateSequence, DropSequence
# Zato
from zato.common.odb import model
# ################################################################################################################################
def upgrade():
op.execute(CreateSequence(sa.Sequence('email_smtp_seq')))
op.create_table(
model.SMTP.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('email_smtp_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('host', sa.String(400), nullable=False),
sa.Column('port', sa.Integer(), nullable=False),
sa.Column('timeout', sa.Integer(), nullable=False),
sa.Column('is_debug', sa.Boolean(), nullable=False),
sa.Column('username', sa.String(400), nullable=True),
sa.Column('password', sa.String(400), nullable=True),
sa.Column('mode', sa.String(20), nullable=False),
sa.Column('ping_address', sa.String(200), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
)
op.create_unique_constraint(
'email_smtp_name_cluster_id_key', model.SMTP.__tablename__, ['name', 'cluster_id']
)
def downgrade():
op.drop_table(model.SMTP.__tablename__)
op.execute(DropSequence(sa.Sequence('email_smtp_seq')))
|
Teamxrtc/webrtc-streaming-node
|
refs/heads/master
|
third_party/webrtc/src/chromium/src/tools/gyp/test/rules-rebuild/src/make-sources.py
|
337
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
assert len(sys.argv) == 4, sys.argv
(in_file, c_file, h_file) = sys.argv[1:]
def write_file(filename, contents):
open(filename, 'wb').write(contents)
write_file(c_file, open(in_file, 'rb').read())
write_file(h_file, '#define NAME "%s"\n' % in_file)
sys.exit(0)
|
jluissandovalm/lammps_smd
|
refs/heads/luis
|
lib/smd/scripts/relicense.py
|
315
|
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <mierle@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: mierle@gmail.com (Keir Mierle)
#
# Make the long-awaited conversion to MPL.
lgpl3_header = '''
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
'''
mpl2_header = """
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import os
import sys
exclusions = set(['relicense.py'])
def update(text):
if text.find(lgpl3_header) == -1:
return text, False
return text.replace(lgpl3_header, mpl2_header), True
rootdir = sys.argv[1]
for root, sub_folders, files in os.walk(rootdir):
for basename in files:
if basename in exclusions:
print 'SKIPPED', filename
continue
filename = os.path.join(root, basename)
fo = file(filename)
text = fo.read()
fo.close()
text, updated = update(text)
if updated:
fo = file(filename, "w")
fo.write(text)
fo.close()
print 'UPDATED', filename
else:
print ' ', filename
|
atechnicolorskye/Freebase-Query
|
refs/heads/master
|
Freebase_Query.py
|
1
|
"""
FreeBase_Query.py
Description
-----------
The methods Search_Instance, Topic_Instance and Get_Instance enables users to search for a specific instance of a Freebase
relation in Freebase, obtain the description and Freebase relations of a instance, and perform the previous two methods for
multiple queries with the same relations respectively. Parse replicates lambda calculus of a question and employs
Search_Instance and Topic_Instance to do so.
Usage:
Search_Instance
---------------
Search('San Francisco', 'Warriors', 'sports', 1) # User is allowed to select number of results he wants from the query
Search('Titanic', 'James', 'directed') # 1 is default
Topic_Instance
--------------
Topic_Instance('San Francisco', 'Warriors', '/en/golden_state_warriors', '/m/0jmj7', 'sports')
Topic_Instance('Titanic', 'James', 'en/titanic', '/m/0dr_4', 'directed')
Get_Instance
------------
Get_Instance([['San Francisco', 'Warriors'], ['Los Angeles', 'Lakers']], 'sports')
Get_Instance([['Titanic', 'James'], ['Dark Knight', 'Nolan']], 'directed')
Parse
-----
Parse('what genre is the hound of the baskervilles', 'hound of the baskervilles', '', 'genre')
Parse('what articles are in the july 1967 issue of frontier times', 'frontier times', 'july 1967', 'articles')
Si Kai Lee 11/03/2015
"""
import json
import re
import urllib
def Search_Instance(Q1, Q2, Relation, Number=1):
# Get query
api_key = 'AIzaSyDLAownuEEM-WLsT3MGxFhUKFnaTQh8F4w'
service_url = 'https://www.googleapis.com/freebase/v1/search'
params = {
'query': Q1+'+'+Q2,
'key': api_key
}
url = service_url + '?' + urllib.urlencode(params)
response = json.loads(urllib.urlopen(url).read())
required_response = []
for y in range(Number):
try:
# [name, id, mid, confidence, relation]
z = [response['result'][y]['name'], str(response['result'][y]['id']), str(response['result'][y]['mid']), '('+str(response['result'][y]['score'])+')', Relation]
required_response.append(z)
except IndexError:
return "This search did not yield any valid results. Please check your inputs for errors and try again."
return required_response
def Topic_Instance(Q1, Q2, ID, MID, Relation):
api_key = 'AIzaSyDLAownuEEM-WLsT3MGxFhUKFnaTQh8F4w'
service_url = 'https://www.googleapis.com/freebase/v1/topic'
params = {
'key': api_key,
}
url = service_url + MID + '?' + urllib.urlencode(params)
topic = json.loads(urllib.urlopen(url).read())
print url
try:
# Obtains description of topic
Desc = topic['property']['/common/topic/description']['values'][0]['value']
except KeyError:
return "This search did not yield any valid results. This search did not yield any valid results. Please check your inputs for errors and try again."
# Initiates list collecting possible Freebase relations
RShip = []
# Sets default Freebase relation as the relation found under /common/topic/notable_types
Default = topic['property']['/common/topic/notable_types']['values'][0]['id']
Default_Count = re.findall('\/', Default)
Default_1 = Default
# Collects matches between Relation and Freebase relations, replaces default relation if match is found
for x in topic['property']:
try:
if Relation in x and Relation not in Default:
Default = x
RShip.append(x)
except IndexError:
pass
# for x in topic['property']:
# for v in range(len(topic['property'][x]['values'])):
# try:
# y = topic['property'][x]['values'][v]['text']
# if Q1 in y or Q2 in y:
# if Relation in x and Relation not in Default:
# Default = x
# RShip.append(x)
# except IndexError:
# pass
# Ensures that output Freebase relation is most general
for x in RShip:
# print x
if re.findall('\/', x) < Default_Count:
Default = x
Default_Count = re.findall('\/', Default)
# for x in topic['property']:
# for v in range(len(topic['property'][x]['values'])):
# try:
# y = topic['property'][x]['values'][v]['text']
# if re.findall('\/common|\/wiki|\/type', x) == [] and re.findall('\/common|\/wiki', y) == []:
# if Q1 in y or Q2 in y:
# RShip.append([x, topic['property'][x]['values'][v]['text']])
# except IndexError:
# pass
# Trims output Freebase relation so that the last word is similar to Relation
Default_p = Default.split('/')
for x in range(len(Default_p))[::-1]:
if Relation not in Default_p[x]:
Default_p.pop(x)
else:
break
Default = '/'.join(Default_p)
return [Q1, Q2, ID, Desc, Default, Default_1]
def Get_Instance(Instances, Relation):
Responses_Col = []
# Iterates through the list
for x in range(len(Instances)):
# Checks if more than one instance is required per search
if len(Instances[x]) == 2:
Q1, Q2 = Instances[x]
else:
Q1, Q2, Number = Instances[x]
# Search_Instance
Responses = Search_Instance(Q1, Q2, Relation, Number=1)
# Initiates list collecting Topic_Instance outputs
Responses_i = []
# Iterates through list of responses and run Topic_Instance for each response
for x in range(len(Responses)):
try:
y = Topic_Instance(Q1, Q2, Responses[x][1], Responses[x][2], Responses[x][-1])
Responses_i.append([Responses[x][0], y[2], y[3], y[4], y[5]])
except IndexError:
return "This search did not yield any valid results. Please check your inputs for errors and try again."
Responses_Col.append(Responses_i)
return Responses_Col
# (lambda $0 /people/person (/architecture/architectural_style@architects:t /en/bauhaus:/architecture/architectural_style $0))
def Parse(Input, Q1, Q2, Relation):
Input = Input.split()
# Sorts the different possible questions asked
if Input[0].lower() == 'who':
Parse = ['(lambda $0 /people/person (/', '', ' $0))']
elif Input[0].lower() == 'how':
if Input[1].lower() == 'many':
Parse = ['(count $0 (/', '', ' $0))']
elif Input[1].lower() == 'long':
Parse = ['(lambda $0 /type/int (/', '', ' $0))']
elif Input[0].lower() == 'when':
Parse = ['(lambda $0 /type/datetime (/', '', ' $0))']
elif Input[0].lower() == 'where':
Parse = ['(lambda $0 /location/location (/', '', ' $0))']
else:
Parse = ['(lambda $0 /common/topic (/', '', ' $0))']
Responses = Search_Instance(Q1, Q2, Relation, Number=1)
Topic = Topic_Instance(Q1, Q2, Responses[0][1], Responses[0][2], Responses[0][-1])
if Topic == "This search did not yield any valid results. Please check your inputs for errors and try again.":
return "This search did not yield any valid results. Please check your inputs for errors and try again."
# Checks if there was a relation mathc
if Topic[-2] == '':
Delta_End = Topic[-1].split('/')
Delta = Delta_End.pop()
Delta_End.pop(0)
else:
# Provides the phrases before and after @
try:
Delta = list(set(Topic[-2].split('/')) - set(Topic[-1].split('/')))
Delta_End = Topic[-2].split('/')
Delta_End = Delta_End[1:(len(Delta_End)-len(Delta))]
# If Topic[-2] is exactly identical or completely different from Topic[-1], use the last word of Topic[-2] as
# the word after @
if len(Delta) == len(Topic[-2].split('/')) - 1 or len(Delta) == 0:
Delta_End = Topic[-2].split('/')
Delta_End.pop(0)
Delta = Delta_End.pop()
except ValueError:
Delta_End = Topic[-2].split('/')
Delta = Delta_End.pop()
Delta_End.pop(0)
# print Delta, Delta_End
# Concatenates the phrase to be inserted in the lambda calculus
try:
if type(Delta) is list:
Insertion = ('/').join(Delta_End) + '@' + ('@').join(Delta) + ':t ' + Topic[2] + ': '+ ('/').join(Delta_End)
else:
Insertion = ('/').join(Delta_End) + '@' + Delta + ':t ' + Topic[2] + ': '+ ('/').join(Delta_End)
# print Insertion
Parse[1] = Insertion
except TypeError:
return "This search did not yield any valid results. Please check your inputs for errors and try again."
return ('').join(Parse)
# def Search_Relation(Instance, Relation, Number=10):
# api_key = 'AIzaSyDLAownuEEM-WLsT3MGxFhUKFnaTQh8F4w'
# service_url = 'https://www.googleapis.com/freebase/v1/search'
# params = {
# 'query': Instance,
# 'filter': "(all type:\""+Relation.lower()+"\")",
# 'key': api_key
# }
# url = service_url + '?' + urllib.urlencode(params)
# response = json.loads(urllib.urlopen(url).read())
# required_response = []
# for y in range(Number):
# z = [response['result'][y]['name'], str(response['result'][y]['id']), str(response['result'][y]['mid']), '('+str(response['result'][y]['score'])+')']
# required_response.append(z)
# return required_response
# def Topic_Relation(Name, MID, ID='Null'):
# api_key = 'AIzaSyDLAownuEEM-WLsT3MGxFhUKFnaTQh8F4w'
# service_url = 'https://www.googleapis.com/freebase/v1/topic'
# params = {
# 'key': api_key
# }
# url = service_url + MID + '?' + urllib.urlencode(params)
# topic = json.loads(urllib.urlopen(url).read())
# Desc = topic['property']['/common/topic/description']['values'][0]['value']
# RShip = []
# # RShip_1 = []
# # for property in topic['property']:
# # print property + ':'
# # for value in topic['property'][property]['values']:
# # try:
# # RShip.append(value['id'])
# # except KeyError:
# # continue
# for property in topic['property']:
# print property + ':'
# for value in topic['property'][property]['values']:
# try:
# RShip.append(value['id'])
# except KeyError:
# pass
# # print 'Is RShip = RShip_1' + str(RShip == RShip_1)
# RShip_P = [i for i in RShip if not re.findall("\/m.+|\/user.+", i)]
# # print RShip_P
# RShip_C = list(set(RShip_P))
# if ID == 'Null':
# Result = [Name, Desc, RShip_C]
# else:
# Result = [Name, ID, Desc, RShip_C]
# return Result
# def Get_Relation(Instance, Relation, Number=10):
# Responses_Col = []
# for i in Instance:
# Responses = Search_Relation(i, Relation, Number)
# Responses_i = []
# for x in range(len(Responses)):
# y = Topic_Relation(Responses[x][0], Responses[x][2], Responses[x][1])
# Responses_i.append(y)
# Responses_Col.append(Responses_i)
# return Responses_Col
|
Deltares/aqua-monitor
|
refs/heads/master
|
app/pyasn1_modules/__init__.py
|
72
|
# http://www.python.org/dev/peps/pep-0396/
__version__ = '0.0.8'
|
EthanBlackburn/sync-engine
|
refs/heads/master
|
inbox/api/err.py
|
2
|
from flask import jsonify, make_response
class APIException(Exception):
pass
class InputError(APIException):
"""Raised on invalid user input (missing required parameter, value too
long, etc.)"""
status_code = 400
def __init__(self, message):
self.message = message
class NotFoundError(APIException):
"""Raised when a requested resource doesn't exist."""
status_code = 404
def __init__(self, message):
self.message = message
class ConflictError(APIException):
status_code = 409
def __init__(self, message):
self.message = message
def err(http_code, message, **kwargs):
resp = {
'type': 'api_error',
'message': message
}
resp.update(kwargs)
return make_response(jsonify(resp), http_code)
|
hdmetor/scikit-learn
|
refs/heads/master
|
examples/feature_selection/feature_selection_pipeline.py
|
342
|
"""
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
print(__doc__)
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X, y)
anova_svm.predict(X)
|
k0st1an/beetlejuice
|
refs/heads/master
|
apps/mailer/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 19:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('last_update', models.DateTimeField(auto_now=True)),
('enable', models.BooleanField(default=True)),
('name', models.CharField(max_length=30, unique=True)),
('sender', models.CharField(max_length=100)),
],
options={
'verbose_name_plural': 'Actions',
'verbose_name': 'Action',
},
),
migrations.CreateModel(
name='Recipient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('last_update', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254)),
],
options={
'verbose_name_plural': 'Recipients',
'verbose_name': 'Recipient',
},
),
migrations.CreateModel(
name='SMTP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('last_update', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=30)),
('host', models.CharField(max_length=254)),
('port', models.IntegerField(blank=True, null=True)),
('ssl_port', models.IntegerField(blank=True, null=True)),
('login', models.CharField(max_length=254)),
('password', models.CharField(max_length=254)),
],
options={
'verbose_name_plural': 'SMTPs',
'verbose_name': 'SMTP',
},
),
migrations.AddField(
model_name='action',
name='recipients',
field=models.ManyToManyField(to='mailer.Recipient'),
),
migrations.AddField(
model_name='action',
name='smtp',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mailer.SMTP'),
),
]
|
enthought/pyside
|
refs/heads/master
|
tests/QtDeclarative/bug_951.py
|
6
|
from PySide.QtDeclarative import QDeclarativeItem, qmlRegisterType, QDeclarativeView
from PySide.QtCore import QUrl
from helper import adjust_filename, TimedQApplication
import unittest
class MyItem(QDeclarativeItem):
COMPONENT_COMPLETE_CALLED = False
def __init__(self,parent=None):
super(MyItem, self).__init__(parent)
self.setObjectName("myitem")
def componentComplete(self):
MyItem.COMPONENT_COMPLETE_CALLED = True
super(MyItem, self).componentComplete()
class TestRegisterQMLType(TimedQApplication):
def setup(self):
TimedQApplication.setup(self, 100 * 3) # 3s
def testSignalEmission(self):
qmlRegisterType(MyItem, "my.item", 1, 0, "MyItem")
view = QDeclarativeView()
view.setSource(QUrl.fromLocalFile(adjust_filename('bug_951.qml', __file__)))
self.app.exec_()
self.assertTrue(MyItem.COMPONENT_COMPLETE_CALLED)
if __name__ == '__main__':
unittest.main()
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/weapon/melee/sword/shared_sword_lightsaber_obi.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/sword/shared_sword_lightsaber_obi.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_obi")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
OpusVL/odoo
|
refs/heads/master
|
addons/account_analytic_plans/report/__init__.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crossovered_analytic
|
fldc/CouchPotatoServer
|
refs/heads/custom
|
libs/subliminal/services/subscenter.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright 2012 Ofir Brukner <ofirbrukner@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..videos import Episode, Movie
from ..utils import to_unicode, get_keywords
from bs4 import BeautifulSoup
import bisect
import json
import logging
logger = logging.getLogger(__name__)
class Subscenter(ServiceBase):
server = 'http://www.subscenter.co/he/'
api_based = False
languages = language_set(['he'])
videos = [Episode, Movie]
require_video = False
def _search_url_title(self, title, kind):
"""Search the URL title for the given `title`.
:param str title: title to search for.
:param str kind: kind of the title, ``movie`` or ``series``.
:return: the URL version of the title.
:rtype: str or None
"""
# make the search
logger.info('Searching title name for %r', title)
r = self.session.get(self.server + 'subtitle/search/', params={'q': title}, allow_redirects=False, timeout=10)
r.raise_for_status()
# if redirected, get the url title from the Location header
if r.is_redirect:
parts = r.headers['Location'].split('/')
# check kind
if parts[-3] == kind:
return parts[-2]
return None
# otherwise, get the first valid suggestion
soup = BeautifulSoup(r.content, ['lxml', 'html.parser'])
suggestions = soup.select('#processes div.generalWindowTop a')
logger.debug('Found %d suggestions', len(suggestions))
for suggestion in suggestions:
parts = suggestion.attrs['href'].split('/')
# check kind
if parts[-3] == kind:
return parts[-2]
def list_checked(self, video, languages):
series = None
season = None
episode = None
title = video.title
if isinstance(video, Episode):
series = video.series
season = video.season
episode = video.episode
return self.query(video.path or video.release, languages, get_keywords(video.guess), series, season,
episode, title)
def query(self, filepath, languages=None, keywords=None, series=None, season=None, episode=None, title=None):
logger.debug(u'Getting subtitles for {0} season {1} episode {2} with languages {3}'.format(
series, season, episode, languages))
# Set the correct parameters depending on the kind.
if series and season and episode:
url_series = self._search_url_title(series, 'series')
url = self.server + 'cst/data/series/sb/{}/{}/{}/'.format(url_series, season, episode)
elif title:
url_title = self._search_url_title(title, 'movie')
url = self.server + 'cst/data/movie/sb/{}/'.format(url_title)
else:
raise ServiceError('One or more parameters are missing')
logger.debug('Searching subtitles for title {0}, season {1}, episode {2}'.format(title, season, episode))
response = self.session.get(url)
if response.status_code != 200:
raise ServiceError('Request failed with status code {0}'.format(response.status_code))
# Loop over results.
subtitles = dict()
response_json = json.loads(response.content)
for language_code, language_data in response_json.items():
language_object = self.get_language(language_code)
if language_object in self.languages and language_object in languages:
for quality_data in language_data.values():
for quality, subtitles_data in quality_data.items():
for subtitle_item in subtitles_data.values():
# Read the item.
subtitle_id = subtitle_item['id']
subtitle_key = subtitle_item['key']
release = subtitle_item['subtitle_version']
subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi)
download_link = self.server_url + 'subtitle/download/{0}/{1}/?v={2}&key={3}'.format(
language_code, subtitle_id, release, subtitle_key)
# Add the release and increment downloaded count if we already have the subtitle.
if subtitle_id in subtitles:
logger.debug('Found additional release {0} for subtitle {1}'.format(
release, subtitle_id))
bisect.insort_left(subtitles[subtitle_id].release, release) # Deterministic order.
continue
# Otherwise create it.
subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(),
download_link, release=to_unicode(release))
logger.debug('Found subtitle %r', subtitle)
subtitles[subtitle_id] = subtitle
return subtitles.values()
def download(self, subtitle):
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = Subscenter
|
dje42/gdb
|
refs/heads/master
|
gdb/testsuite/gdb.python/source2.py
|
34
|
# This testcase is part of GDB, the GNU debugger.
# Copyright 2008-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
print ('y%ss' % 'e')
|
vlinhart/django_shares_count
|
refs/heads/master
|
shares_count/admin.py
|
1
|
from django.contrib import admin
from shares_count.models import Share
class ShareAdmin(admin.ModelAdmin):
list_display = ('created', 'modified', 'shares', 'content_object')
admin.site.register(Share, ShareAdmin)
|
seanli9jan/tensorflow
|
refs/heads/master
|
tensorflow/compiler/tests/binary_ops_test.py
|
2
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(xla_test.XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.cached_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
equality_test(result, expected, rtol=1e-3)
def _testSymmetricBinary(self, op, a, b, expected, equality_test=None):
self._testBinary(op, a, b, expected, equality_test)
self._testBinary(op, b, a, expected, equality_test)
def ListsAreClose(self, result, expected, rtol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllCloseAccordingToType(result[i], expected[i], rtol)
def testFloatOps(self):
for dtype in self.float_types:
if dtype == dtypes.bfloat16.as_numpy_dtype:
a = -1.01
b = 4.1
else:
a = -1.001
b = 4.01
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype))
self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype))
self._testBinary(
math_ops.atan2,
np.array([0, np.sqrt(2), 1, np.sqrt(2), 0], dtype),
np.array([1, np.sqrt(2), 0, -np.sqrt(2), -1], dtype),
expected=np.array(
[0, np.pi / 4, np.pi / 2, np.pi * 3 / 4, np.pi], dtype=dtype))
self._testBinary(
gen_math_ops.reciprocal_grad,
np.array([4, -3, -2, 1], dtype=dtype),
np.array([5, -6, 7, -8], dtype=dtype),
expected=np.array([-80, 54, -28, 8], dtype=dtype))
self._testBinary(
gen_math_ops.sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops.rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_math_ops.sqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))
self._testBinary(
gen_nn_ops.softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))
self._testBinary(
gen_nn_ops.softsign_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[0.11111111, 0.06122449, 0.03125, 0.01234568], dtype=dtype))
self._testBinary(
gen_math_ops.tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops.selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array(
[1.158099340847, 2.7161986816948, 4.67429802254,
4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))
self._testBinary(
gen_nn_ops.relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops.relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.leaky_relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0.2, 0.4, 0.6, 0.8, 1, 6, 7, 8, 9, 10],
dtype=dtype))
self._testBinary(
gen_nn_ops.softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
# TODO(b/68813416): Fails with bfloat16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits,
np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]],
dtype=dtype),
np.array([2, 1, 7], dtype=np.int32),
expected=[
np.array([1.342536, 1.442536, np.nan], dtype=dtype),
np.array(
[[0.213838, 0.236328, -0.738817, 0.288651], [
0.213838, -0.763672, 0.261183, 0.288651
], [np.nan, np.nan, np.nan, np.nan]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
def testIntOps(self):
for dtype in self.signed_int_types:
self._testBinary(
gen_math_ops.truncate_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_and,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b0, 0b101, 0b1000], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_or,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b101, 0b1001], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_xor,
np.array([0b1, 0b111, 0b1100], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b010, 0b0101], dtype=dtype))
lhs = np.array([0, 5, 3, 14], dtype=dtype)
rhs = np.array([5, 0, 7, 11], dtype=dtype)
self._testBinary(
bitwise_ops.left_shift, lhs, rhs,
expected=np.left_shift(lhs, rhs))
self._testBinary(
bitwise_ops.right_shift, lhs, rhs,
expected=np.right_shift(lhs, rhs))
if dtype in [np.int8, np.int16, np.int32, np.int64]:
lhs = np.array([-1, -5, -3, -14, -2], dtype=dtype)
rhs = np.array([5, 0, 1, 11, 36], dtype=dtype)
# HLO has saturating shift behavior.
bits = np.ceil(
np.log(np.iinfo(dtype).max - np.iinfo(dtype).min) / np.log(2))
expected = [
np.right_shift(l, r) if r < bits else np.sign(l)
for l, r in zip(lhs, rhs)
]
self._testBinary(bitwise_ops.right_shift, lhs, rhs, expected=expected)
def testNumericOps(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([11, 22], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([6, 7], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[8], [9]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1, 2, 100], dtype=dtype),
np.array([10, 20, -1], dtype=dtype),
expected=np.array([-9, -18, 101], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
# min/max not supported for complex
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.maximum,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([10, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[10], [7]], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._testBinary(
math_ops.minimum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([1, 5], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[7], [2]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
# Complex support for squared_difference is incidental, see b/68205550
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.squared_difference,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([81, 324], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([16, 9], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[36], [25]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[3, 1], [5, 3]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1, 2], [3, 4]]]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
if np.int64 in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
expected=np.array([1 << 32, 1 << 36, 1 << 32, 1 << 36],
dtype=np.int64))
def testComplexOps(self):
for dtype in self.complex_types:
ctypes = {np.complex64: np.float32}
self._testBinary(
math_ops.complex,
np.array([[[[-1, 2], [2, 0]]]], dtype=ctypes[dtype]),
np.array([[[[2, -3], [0, 4]]]], dtype=ctypes[dtype]),
expected=np.array([[[[-1 + 2j, 2 - 3j], [2, 4j]]]], dtype=dtype))
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array(
[[[[-1 + 2j, 2.00009999 - 3j], [2 - 3j, 3 + 4.01j]]]],
dtype=dtype),
np.array(
[[[[-1.001 + 2j, 2 - 3j], [2 - 3.00009j, 3 + 4j]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3j, -1.5j, -8, 2 + 3j, 2 + 4j], dtype=dtype),
np.array([2, -2, 7j, -4j, 4 - 6j, 1 + 2j], dtype=dtype),
expected=np.array(
[1.5, -1.5j, -0.2142857, -2j, (2 + 3j) / (4 - 6j), 2],
dtype=dtype))
# Test inf/nan scenarios.
self._testBinary(
gen_math_ops.real_div,
np.array([4 + 3j, 4, 3j, -4, -4j, 2 - 3j], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0], dtype=dtype),
expected=np.array(
[
dtype(1 + 1j) / 0,
dtype(1) / 0,
dtype(1j) / 0,
dtype(-1) / 0,
dtype(-1j) / 0,
dtype(1 - 1j) / 0
],
dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(3 + 2j),
dtype(4 - 5j),
expected=np.power(dtype(3 + 2j), dtype(4 - 5j)))
self._testBinary( # empty rhs
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary( # to zero power
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[1, 2], dtype=dtype),
expected=np.ones(shape=[1, 2], dtype=dtype))
lhs = np.array([1 - 2j, 4 + 3j, 2 - 3j, 3, 2j, 1, 4], dtype=dtype)
rhs = np.array([2, 3j, 3 + 4j, 2 + 3j, 3 - 2j, 2, 3 + 3j], dtype=dtype)
scalar = dtype(2 + 2j)
self._testBinary(math_ops.pow, lhs, rhs, expected=np.power(lhs, rhs))
self._testBinary(
math_ops.pow, scalar, rhs, expected=np.power(scalar, rhs))
self._testBinary(math_ops.pow, lhs, scalar, np.power(lhs, scalar))
lhs = np.array([4 + 2j, -3 - 1j, 2j, 1], dtype=dtype)
rhs = np.array([5, -6j, 7 - 3j, -8j], dtype=dtype)
self._testBinary(
gen_math_ops.reciprocal_grad, lhs, rhs, expected=-rhs * lhs * lhs)
self._testBinary(
gen_math_ops.sigmoid_grad, lhs, rhs, expected=rhs * lhs * (1 - lhs))
self._testBinary(
gen_math_ops.rsqrt_grad, lhs, rhs, expected=lhs**3 * rhs / -2)
self._testBinary(
gen_math_ops.sqrt_grad, lhs, rhs, expected=rhs / (2 * lhs))
self._testBinary(
gen_math_ops.tanh_grad, lhs, rhs, expected=rhs * (1 - lhs * lhs))
def testComplexMath(self):
for dtype in self.complex_types:
self._testBinary(
math_ops.add,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([11 - 1j, 22 + 24j], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([6 - 5j, 7 - 3j], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[8 + 3j], [9 + 6j]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([-9 + 7j, -18 - 10j], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([4 - 9j, 3 - 11j], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[-6 - 7j], [-5 - 4j]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array(
[(1 + 3j) * (10 - 4j), (2 + 7j) * (20 + 17j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array(
[(5 - 7j) * (1 + 2j), (5 - 7j) * (2 + 4j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array(
[[(7 + 5j) * (1 - 2j)], [(7 + 5j) * (2 + 1j)]], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([8 - 1j, 2 + 16j], dtype=dtype),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(8 - 1j) / (2 + 4j), (2 + 16j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(1 + 2j),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(1 + 2j) / (2 + 4j), (1 + 2j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([2 + 4j, 4 - 8j], dtype=dtype),
dtype(1 + 2j),
expected=np.array(
[(2 + 4j) / (1 + 2j), (4 - 8j) / (1 + 2j)], dtype=dtype))
# TODO(b/68205550): math_ops.squared_difference shouldn't be supported.
self._testBinary(
nn_ops.bias_add,
np.array([[1 + 2j, 2 + 7j], [3 - 5j, 4 + 2j]], dtype=dtype),
np.array([2 + 6j, -1 - 3j], dtype=dtype),
expected=np.array([[3 + 8j, 1 + 4j], [5 + 1j, 3 - 1j]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1 + 4j, 2 - 1j], [3 + 7j, 4]]]], dtype=dtype),
np.array([2 + 1j, -1 + 2j], dtype=dtype),
expected=np.array(
[[[[3 + 5j, 1 + 1j], [5 + 8j, 3 + 2j]]]], dtype=dtype))
def _testDivision(self, dtype):
"""Test cases for division operators."""
self._testBinary(
math_ops.div,
np.array([10, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 10], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(40),
np.array([2, 20], dtype=dtype),
expected=np.array([20, 2], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([[10], [4]], dtype=dtype),
dtype(2),
expected=np.array([[5], [2]], dtype=dtype))
if dtype in [np.float32, np.float64]:
nums = np.arange(-10, 10, .25, dtype=dtype).reshape(80, 1)
divs = np.arange(-3, 3, .25, dtype=dtype).reshape(1, 24)
np_result = np.true_divide(nums, divs)
np_result[:, divs[0] == 0] = 0
self._testBinary(gen_math_ops.div_no_nan, nums, divs, expected=np_result)
if dtype not in self.complex_types: # floordiv unsupported for complex.
self._testBinary(
gen_math_ops.floor_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -2, -1, -5, 2], dtype=dtype))
def testIntDivision(self):
for dtype in self.signed_int_types:
self._testDivision(dtype)
def testFloatDivision(self):
for dtype in self.float_types | self.complex_types:
self._testDivision(dtype)
def _testRemainder(self, dtype):
"""Test cases for remainder operators."""
self._testBinary(
gen_math_ops.floor_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, -1, 6, 0], dtype=dtype))
self._testBinary(
gen_math_ops.truncate_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, 1, -1, 0], dtype=dtype))
def testIntRemainder(self):
for dtype in self.signed_int_types - {np.int8}:
self._testRemainder(dtype)
def testFloatRemainder(self):
for dtype in self.float_types:
self._testRemainder(dtype)
def testLogicalOps(self):
self._testBinary(
math_ops.logical_and,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[False, False], [False, True]], dtype=np.bool))
self._testBinary(
math_ops.logical_or,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[True, True], [False, True]], dtype=np.bool))
def testComparisons(self):
self._testBinary(
math_ops.equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [False]], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [True]], dtype=np.bool))
for greater_op in [math_ops.greater, (lambda x, y: x > y)]:
self._testBinary(
greater_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
greater_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
greater_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [False]], dtype=np.bool))
for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:
self._testBinary(
greater_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [True], [False]], dtype=np.bool))
for less_op in [math_ops.less, (lambda x, y: x < y)]:
self._testBinary(
less_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
less_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
if np.int64 in self.numeric_types:
self._testBinary(
less_op,
np.array([[10], [7], [2], [-1]], dtype=np.int64),
np.int64(7),
expected=np.array(
[[False], [False], [True], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
less_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
less_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
less_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [True]], dtype=np.bool))
def testS64Comparisons(self):
for op in [(lambda x, y: x < y), (lambda x, y: x <= y),
(lambda x, y: x >= y), (lambda x, y: x > y)]:
lhs = np.array(
[
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000000),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x7ffffffefff00010),
np.int64(0x7ffffffefff00010),
np.int64(-1),
np.int64(-1)
],
dtype=np.int64)
rhs = np.array(
[
np.int64(0x000000007FFFFFFE),
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0001),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000001),
np.int64(0x0000000100000002),
np.int64(0x0000000100000003),
np.int64(0x0000000200000001),
np.int64(0x0000000200000002),
np.int64(0x0000000200000003),
np.int64(0x0000000300000001),
np.int64(0x0000000300000002),
np.int64(0x0000000300000003),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(-2),
np.int64(-1)
],
dtype=np.int64)
expected = np.array([op(l, r) for l, r in zip(lhs, rhs)], dtype=np.bool)
self._testBinary(op, lhs, rhs, expected=expected)
def testBroadcasting(self):
"""Tests broadcasting behavior of an operator."""
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array(3, dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([13, 23], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([10, 20], dtype=dtype),
np.array(4, dtype=dtype),
expected=np.array([14, 24], dtype=dtype))
# [1,3] x [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([[10, 20, 30]], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
# [3] * [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([10, 20, 30], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
def testFill(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.fill,
np.array([], dtype=np.int32),
dtype(-42),
expected=dtype(-42))
self._testBinary(
array_ops.fill,
np.array([1, 2], dtype=np.int32),
dtype(7),
expected=np.array([[7, 7]], dtype=dtype))
self._testBinary(
array_ops.fill,
np.array([3, 2], dtype=np.int32),
dtype(50),
expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))
# Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.
def _testMatMul(self, op):
for dtype in self.float_types:
self._testBinary(
op,
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
self._testBinary(
op,
np.array([[100, 10, 0.5]], dtype=dtype),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
expected=np.array([[123, 354]], dtype=dtype))
self._testBinary(
op,
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([[100], [10]], dtype=dtype),
expected=np.array([[130], [250], [680]], dtype=dtype))
self._testBinary(
op,
np.array([[1000, 100], [10, 1]], dtype=dtype),
np.array([[1, 2], [3, 4]], dtype=dtype),
expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))
self._testBinary(
op,
np.array([], dtype=dtype).reshape((2, 0)),
np.array([], dtype=dtype).reshape((0, 3)),
expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))
def testMatMul(self):
self._testMatMul(math_ops.matmul)
# TODO(phawkins): failing on GPU, no registered kernel.
def DISABLED_testSparseMatMul(self):
# Binary wrappers for sparse_matmul with different hints
def SparseMatmulWrapperTF(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True)
def SparseMatmulWrapperFT(a, b):
return math_ops.sparse_matmul(a, b, b_is_sparse=True)
def SparseMatmulWrapperTT(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)
self._testMatMul(math_ops.sparse_matmul)
self._testMatMul(SparseMatmulWrapperTF)
self._testMatMul(SparseMatmulWrapperFT)
self._testMatMul(SparseMatmulWrapperTT)
def testBatchMatMul(self):
# Same tests as for tf.matmul above.
self._testMatMul(math_ops.matmul)
# Tests with batches of matrices.
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]]], dtype=np.float32),
np.array([[[8]]], dtype=np.float32),
expected=np.array([[[-2]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]], [[4]]], dtype=np.float32),
np.array([[[8]], [[2]]], dtype=np.float32),
expected=np.array([[[-2]], [[8]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array(
[[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
[[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=np.float32),
np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],
[[55, 66], [77, 88]]]],
dtype=np.float32),
expected=np.array(
[[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
[[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((2, 2, 0)),
np.array([], dtype=np.float32).reshape((2, 0, 3)),
expected=np.array(
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((0, 2, 4)),
np.array([], dtype=np.float32).reshape((0, 4, 3)),
expected=np.array([], dtype=np.float32).reshape(0, 2, 3))
# Regression test for b/31472796.
if hasattr(np, "matmul"):
x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x, x,
expected=np.matmul(x, x.transpose([0, 1, 3, 2])))
def testExpandDims(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.expand_dims,
dtype(7),
np.int32(0),
expected=np.array([7], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([42], dtype=dtype),
np.int32(0),
expected=np.array([[42]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([], dtype=dtype),
np.int32(0),
expected=np.array([[]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(0),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(1),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(2),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(3),
expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.pad,
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[1, 2], [2, 1]], dtype=np.int32),
expected=np.array(
[[0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0],
[0, 0, 4, 5, 6, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
dtype=dtype))
self._testBinary(
lambda x, y: array_ops.pad(x, y, constant_values=7),
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[0, 3], [2, 1]], dtype=np.int32),
expected=np.array(
[[7, 7, 1, 2, 3, 7],
[7, 7, 4, 5, 6, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7]],
dtype=dtype))
def testSymmetricMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "SYMMETRIC")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
2,
2,
], [3, 3]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
def testReflectMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "REFLECT")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
1,
1,
], [2, 2]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1], #
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9]
],
dtype=dtype),
np.array([[2, 2], [0, 0]], dtype=np.int32),
expected=np.array(
[
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype),
np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),
expected=np.array(
[
[
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
],
[
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
]
],
dtype=dtype))
def testReshape(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.reshape,
np.array([], dtype=dtype),
np.array([0, 4], dtype=np.int32),
expected=np.zeros(shape=[0, 4], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 6], dtype=np.int32),
expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([6, -1], dtype=np.int32),
expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, -1], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
def testSplit(self):
for dtype in self.numeric_types:
for axis in [0, -3]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1], [2]]], dtype=dtype),
np.array([[[3], [4]]], dtype=dtype),
np.array([[[5], [6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
for axis in [1, -2]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1]], [[3]], [[5]]], dtype=dtype),
np.array([[[2]], [[4]], [[6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def splitvOp(x, y): # pylint: disable=invalid-name
return array_ops.split(value=y, num_or_size_splits=[2, 3], axis=x)
for axis in [1, -1]:
self._testBinary(
splitvOp,
np.int32(axis),
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=dtype),
expected=[
np.array([[0, 1], [5, 6]], dtype=dtype),
np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def testTile(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.tile,
np.array([[6], [3], [4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([6, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6, 3, 4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([2, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[6, 6]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1], [2]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[1, 1], [2, 2]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array(
[[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2]], dtype=dtype),
np.array([3, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[1, 2],
[1, 2]],
dtype=dtype))
def testTranspose(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1, 3], [2, 4]], dtype=dtype))
def testConjugateTranspose(self):
for dtype in self.complex_types:
self._testBinary(
array_ops.conjugate_transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1 + 1j, 2 - 2j], [3 + 3j, 4 - 4j]], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1 + 1j, 3 + 3j], [2 - 2j, 4 - 4j]], dtype=dtype))
def testCross(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops.cross,
np.zeros((4, 3), dtype=dtype),
np.zeros((4, 3), dtype=dtype),
expected=np.zeros((4, 3), dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype),
expected=np.array([-3, 6, -3], dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),
np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),
expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))
def testBroadcastArgs(self):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 1, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 1, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Incompatible shapes"):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1, 2, 3], dtype=np.int32),
np.array([4, 5, 6], dtype=np.int32),
expected=None)
def testMatrixSetDiag(self):
for dtype in self.numeric_types:
# Square
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
dtype=dtype),
np.array([1.0, 2.0, 3.0], dtype=dtype),
expected=np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0], [1.0, 1.0, 3.0]],
dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0], [2.0, 0.0, 6.0]]],
dtype=dtype),
np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]], dtype=dtype),
expected=np.array(
[[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0], [1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0], [2.0, 0.0, -6.0]]],
dtype=dtype))
# Rectangular
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]], dtype=dtype),
np.array([[-1.0, -2.0], [-4.0, -5.0]],
dtype=dtype),
expected=np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]],
dtype=dtype))
def testBroadcastTo(self):
for dtype in self.all_types:
x = np.random.randint(0, high=100, size=[2, 3])
self._testBinary(
array_ops.broadcast_to,
x,
np.array([2, 3], dtype=np.int32),
expected=x)
self._testBinary(
array_ops.broadcast_to,
x,
np.array([6, 6], dtype=np.int32),
expected=np.tile(x, [3, 2]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 4, 3], dtype=np.int32),
expected=np.tile(x, [7, 2, 1]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 0, 3], dtype=np.int32),
expected=np.zeros([7, 0, 3], dtype=dtype))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 1, 2, 9], dtype=np.int32),
expected=np.tile(x, [7, 1, 1, 3]))
self._testBinary(
array_ops.broadcast_to,
np.zeros([2, 0], dtype=dtype),
np.array([4, 0], dtype=np.int32),
expected=np.zeros([4, 0], dtype=dtype))
x = np.arange(3).reshape((3, 1, 1, 1)).astype(dtype)
self._testBinary(
array_ops.broadcast_to,
x,
np.array((3, 7, 8, 9), dtype=np.int32),
expected=np.tile(x, (1, 7, 8, 9)))
if __name__ == "__main__":
googletest.main()
|
timj/scons
|
refs/heads/master
|
test/Exit.py
|
1
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the explicit Exit() function.
"""
import os.path
import TestSCons
test = TestSCons.TestSCons()
test.subdir('subdir')
subdir_foo_in = os.path.join('subdir', 'foo.in')
subdir_foo_out = os.path.join('subdir', 'foo.out')
test.write('SConstruct', """\
print("SConstruct, Exit()")
Exit()
""")
test.run(stdout = """\
scons: Reading SConscript files ...
SConstruct, Exit()
""")
test.write('SConstruct', """\
env = Environment()
print("SConstruct, env.Exit()")
env.Exit()
""")
test.run(stdout = """\
scons: Reading SConscript files ...
SConstruct, env.Exit()
""")
test.write('SConstruct', """\
print("SConstruct")
Exit(7)
""")
test.run(status = 7, stdout = """\
scons: Reading SConscript files ...
SConstruct
""")
test.write('SConstruct', """\
print("SConstruct")
SConscript('subdir/SConscript')
""")
test.write(['subdir', 'SConscript'], """\
print("subdir/SConscript")
Exit()
""")
test.run(stdout = """\
scons: Reading SConscript files ...
SConstruct
subdir/SConscript
""")
test.write(['subdir', 'SConscript'], """\
print("subdir/SConscript")
Exit(17)
""")
test.run(status = 17, stdout = """\
scons: Reading SConscript files ...
SConstruct
subdir/SConscript
""")
test.write('SConstruct', """\
SConscript('subdir/SConscript')
""")
test.write(['subdir', 'SConscript'], """\
def exit_builder(env, source, target):
target = str(target[0])
f = open(target, "wb")
for src in source:
f.write(open(str(src), "rb").read())
f.close()
Exit(27)
env = Environment(BUILDERS = {'my_exit' : Builder(action=exit_builder)})
env.my_exit('foo.out', 'foo.in')
""")
test.write(['subdir', 'foo.in'], "subdir/foo.in\n")
test.run(status = 27,
stdout = test.wrap_stdout("""\
exit_builder(["%s"], ["%s"])
""" % (subdir_foo_out, subdir_foo_in), error=1),
stderr = """\
scons: *** [%s] Explicit exit, status 27
""" % (subdir_foo_out))
test.must_match(['subdir', 'foo.out'], "subdir/foo.in\n")
test.write('SConstruct', """\
def exit_scanner(node, env, target):
Exit(37)
exitscan = Scanner(function = exit_scanner, skeys = ['.k'])
def cat(env, source, target):
target = str(target[0])
outf = open(target, 'wb')
for src in source:
outf.write(open(str(src), "rb").read())
outf.close()
env = Environment(BUILDERS={'Cat':Builder(action=cat)})
env.Append(SCANNERS = [exitscan])
env.Cat('foo', 'foo.k')
""")
test.write('foo.k', "foo.k\n")
test.run(status = 37,
stdout = test.wrap_stdout("", error=1),
stderr = "scons: *** [foo] Explicit exit, status 37\n")
#
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
EnderCheng/pyspider
|
refs/heads/master
|
tests/test_run.py
|
3
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2014-11-21 22:32:35
import os
import six
import json
import shutil
import unittest2 as unittest
from pyspider import run
from pyspider.libs.utils import ObjectDict
class TestRun(unittest.TestCase):
@classmethod
def setUpClass(self):
shutil.rmtree('./data/tests', ignore_errors=True)
os.makedirs('./data/tests')
@classmethod
def tearDownClass(self):
shutil.rmtree('./data/tests', ignore_errors=True)
def test_10_cli(self):
ctx = run.cli.make_context('test', [], None, obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
self.assertEqual(ctx.obj.debug, False)
for db in ('taskdb', 'projectdb', 'resultdb'):
self.assertIsNotNone(getattr(ctx.obj, db))
for name in ('newtask_queue', 'status_queue', 'scheduler2fetcher',
'fetcher2processor', 'processor2result'):
self.assertIsNotNone(getattr(ctx.obj, name))
self.assertEqual(len(ctx.obj.instances), 0)
def test_20_cli_config(self):
with open('./data/tests/config.json', 'w') as fp:
json.dump({
'debug': True,
'taskdb': 'mysql+taskdb://localhost:23456/taskdb',
'amqp-url': 'amqp://guest:guest@localhost:23456/%%2F'
}, fp)
ctx = run.cli.make_context('test',
['--config', './data/tests/config.json'],
None, obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
self.assertEqual(ctx.obj.debug, True)
import mysql.connector
with self.assertRaises(mysql.connector.InterfaceError):
ctx.obj.taskdb
with self.assertRaisesRegexp(Exception, 'Connection refused'):
ctx.obj.newtask_queue
def test_30_cli_command_line(self):
ctx = run.cli.make_context(
'test',
['--projectdb', 'mongodb+projectdb://localhost:23456/projectdb'],
None,
obj=ObjectDict(testing_mode=True)
)
ctx = run.cli.invoke(ctx)
from pymongo.errors import ConnectionFailure
with self.assertRaises(ConnectionFailure):
ctx.obj.projectdb
def test_40_cli_env(self):
try:
os.environ['RESULTDB'] = 'sqlite+resultdb://'
ctx = run.cli.make_context('test', [], None,
obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
from pyspider.database.sqlite import resultdb
self.assertIsInstance(ctx.obj.resultdb, resultdb.ResultDB)
finally:
del os.environ['RESULTDB']
@unittest.skipIf(os.environ.get('IGNORE_RABBITMQ'), 'no rabbitmq server for test.')
def test_50_docker_rabbitmq(self):
try:
os.environ['RABBITMQ_NAME'] = 'rabbitmq'
os.environ['RABBITMQ_PORT_5672_TCP_ADDR'] = 'localhost'
os.environ['RABBITMQ_PORT_5672_TCP_PORT'] = '5672'
ctx = run.cli.make_context('test', [], None,
obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
queue = ctx.obj.newtask_queue
queue.put('abc')
queue.delete()
except Exception as e:
self.assertIsNone(e)
finally:
del os.environ['RABBITMQ_NAME']
del os.environ['RABBITMQ_PORT_5672_TCP_ADDR']
del os.environ['RABBITMQ_PORT_5672_TCP_PORT']
@unittest.skipIf(os.environ.get('IGNORE_MONGODB'), 'no mongodb server for test.')
def test_60_docker_mongodb(self):
try:
os.environ['MONGODB_NAME'] = 'mongodb'
os.environ['MONGODB_PORT_27017_TCP_ADDR'] = 'localhost'
os.environ['MONGODB_PORT_27017_TCP_PORT'] = '27017'
ctx = run.cli.make_context('test', [], None,
obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
ctx.obj.resultdb
except Exception as e:
self.assertIsNone(e)
finally:
del os.environ['MONGODB_NAME']
del os.environ['MONGODB_PORT_27017_TCP_ADDR']
del os.environ['MONGODB_PORT_27017_TCP_PORT']
@unittest.skipIf(os.environ.get('IGNORE_MYSQL'), 'no mysql server for test.')
def test_70_docker_mysql(self):
try:
os.environ['MYSQL_NAME'] = 'mysql'
os.environ['MYSQL_PORT_3306_TCP_ADDR'] = 'localhost'
os.environ['MYSQL_PORT_3306_TCP_PORT'] = '3306'
ctx = run.cli.make_context('test', [], None,
obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
ctx.obj.resultdb
except Exception as e:
self.assertIsNone(e)
finally:
del os.environ['MYSQL_NAME']
del os.environ['MYSQL_PORT_3306_TCP_ADDR']
del os.environ['MYSQL_PORT_3306_TCP_PORT']
def test_80_docker_phantomjs(self):
try:
os.environ['PHANTOMJS_NAME'] = 'phantomjs'
os.environ['PHANTOMJS_PORT'] = 'tpc://binux:25678'
ctx = run.cli.make_context('test', [], None,
obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
self.assertEqual(ctx.obj.phantomjs_proxy, 'binux:25678')
except Exception as e:
self.assertIsNone(e)
finally:
del os.environ['PHANTOMJS_NAME']
del os.environ['PHANTOMJS_PORT']
def test_90_docker_scheduler(self):
try:
os.environ['SCHEDULER_NAME'] = 'scheduler'
os.environ['SCHEDULER_PORT_23333_TCP'] = 'tpc://binux:25678'
ctx = run.cli.make_context('test', [], None,
obj=ObjectDict(testing_mode=True))
ctx = run.cli.invoke(ctx)
webui = run.cli.get_command(ctx, 'webui')
webui_ctx = webui.make_context('webui', [], ctx)
app = webui.invoke(webui_ctx)
rpc = app.config['scheduler_rpc']
self.assertEqual(rpc._ServerProxy__host, 'binux:25678')
except Exception as e:
self.assertIsNone(e)
finally:
del os.environ['SCHEDULER_NAME']
del os.environ['SCHEDULER_PORT_23333_TCP']
def test_a10_all(self):
pass
|
leejw51/BumblebeeNet
|
refs/heads/master
|
Test/MeanSquared.py
|
1
|
import numpy as np
def mean_squared_error(y,t):
return 0.5* np.sum((y-t)**2)
t = np.array( [0, 0, 1, 0,0,0, 0,0,0,0] )
y = np.array( [0.1, 0.05, 0.6, 0, 0.05, 0.1, 0, 0.1, 0,0] )
print(t)
print(y)
m = mean_squared_error(y,t)
print("t shape=", t.shape)
print("y shape=", y.shape)
print(m)
|
ludwiktrammer/odoo
|
refs/heads/9.0
|
openerp/addons/test_assetsbundle/tests/test_assetsbundle.py
|
26
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.tests import HttpCase
from openerp.tests.common import TransactionCase
from openerp.addons.base.ir.ir_qweb import AssetsBundle
from openerp.modules.module import get_resource_path
from mock import patch
from os import utime
import time
class TestJavascriptAssetsBundle(TransactionCase):
def setUp(self):
super(TestJavascriptAssetsBundle, self).setUp()
self.jsbundle_xmlid = 'test_assetsbundle.bundle1'
self.cssbundle_xmlid = 'test_assetsbundle.bundle2'
def _any_ira_for_bundle(self, type):
""" Returns all ir.attachments associated to a bundle, regardless of the verion.
"""
bundle = self.jsbundle_xmlid if type == 'js' else self.cssbundle_xmlid
return self.registry['ir.attachment'].search(self.cr, self.uid,[
('url', '=like', '/web/content/%-%/{0}%.{1}'.format(bundle, type))
])
def test_01_generation(self):
""" Checks that a bundle creates an ir.attachment record when its `js` method is called
for the first time.
"""
self.bundle = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry)
# there shouldn't be any attachment associated to this bundle
self.assertEquals(len(self._any_ira_for_bundle('js')), 0)
self.assertEquals(len(self.bundle.get_attachments('js')), 0)
# trigger the first generation and, thus, the first save in database
self.bundle.js()
# there should be one attachment associated to this bundle
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
self.assertEquals(len(self.bundle.get_attachments('js')), 1)
def test_02_access(self):
""" Checks that the bundle's cache is working, i.e. that the bundle creates only one
ir.attachment record when rendered multiple times.
"""
bundle0 = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry)
bundle0.js()
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
version0 = bundle0.version
ira0 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('js')[0])
date0 = ira0.create_date
bundle1 = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry)
bundle1.js()
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
version1 = bundle1.version
ira1 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('js')[0])
date1 = ira1.create_date
self.assertEquals(version0, version1)
self.assertEquals(date0, date1)
def test_03_date_invalidation(self):
""" Checks that a bundle is invalidated when one of its assets' modification date is changed.
"""
bundle0 = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry)
bundle0.js()
last_modified0 = bundle0.last_modified
version0 = bundle0.version
path = get_resource_path('test_assetsbundle', 'static', 'src', 'js', 'test_jsfile1.js')
utime(path, None) # touch
bundle1 = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry)
bundle1.js()
last_modified1 = bundle1.last_modified
version1 = bundle1.version
self.assertNotEquals(last_modified0, last_modified1)
self.assertNotEquals(version0, version1)
# check if the previous attachment is correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
def test_04_content_invalidation(self):
""" Checks that a bundle is invalidated when its content is modified by adding a file to
source.
"""
bundle0 = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry)
bundle0.js()
html0 = bundle0.html
version0 = bundle0.version
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
view_arch = """
<data>
<xpath expr="." position="inside">
<script type="text/javascript" src="/test_assetsbundle/static/src/js/test_jsfile4.js"/>
</xpath>
</data>
"""
bundle_id = self.browse_ref(self.jsbundle_xmlid).id
newid = self.registry['ir.ui.view'].create(self.cr, self.uid, {
'name': 'test bundle inheritance',
'type': 'qweb',
'arch': view_arch,
'inherit_id': bundle_id,
})
bundle1 = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={'check_view_ids': [newid]}, registry=self.registry)
bundle1.js()
html1 = bundle1.html
version1 = bundle1.version
self.assertNotEquals(html0, html1)
self.assertNotEquals(version0, version1)
# check if the previous attachment are correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('js')), 1)
def test_05_debug(self):
""" Checks that a bundle rendered in debug mode outputs non-minified assets.
"""
debug_bundle = AssetsBundle(self.jsbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry)
content = debug_bundle.to_html(debug=True)
# find back one of the original asset file
self.assertIn('/test_assetsbundle/static/src/js/test_jsfile1.js', content)
# there shouldn't be any assets created in debug mode
self.assertEquals(len(self._any_ira_for_bundle('js')), 0)
def test_06_paginated_css_generation1(self):
""" Checks that a bundle creates enough ir.attachment records when its `css` method is called
for the first time while the number of css rules exceed the limit.
"""
# note: changing the max_css_rules of a bundle does not invalidate its attachments
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
self.bundle.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
self.assertEquals(len(self.bundle.get_attachments('css')), 3)
def test_07_paginated_css_generation2(self):
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=2)
self.bundle.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 2)
self.assertEquals(len(self.bundle.get_attachments('css')), 2)
def test_08_paginated_css_generation3(self):
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=3)
self.bundle.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 1)
self.assertEquals(len(self.bundle.get_attachments('css')), 1)
def test_09_paginated_css_access(self):
""" Checks that the bundle's cache is working, i.e. that a bundle creates only enough
ir.attachment records when rendered multiple times.
"""
bundle0 = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
bundle0.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
version0 = bundle0.version
ira0 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[0])
date0 = ira0.create_date
ira1 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[1])
date1 = ira1.create_date
ira2 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[2])
date2 = ira2.create_date
bundle1 = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
bundle1.css()
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
version1 = bundle1.version
ira3 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[0])
date3 = ira1.create_date
ira4 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[1])
date4 = ira1.create_date
ira5 = self.registry['ir.attachment'].browse(self.cr, self.uid, self._any_ira_for_bundle('css')[2])
date5 = ira1.create_date
self.assertEquals(version0, version1)
self.assertEquals(date0, date3)
self.assertEquals(date1, date4)
self.assertEquals(date2, date5)
def test_10_paginated_css_date_invalidation(self):
""" Checks that a bundle is invalidated when one of its assets' modification date is changed.
"""
bundle0 = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
bundle0.css()
last_modified0 = bundle0.last_modified
version0 = bundle0.version
path = get_resource_path('test_assetsbundle', 'static', 'src', 'css', 'test_cssfile1.css')
utime(path, None) # touch
bundle1 = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
bundle1.css()
last_modified1 = bundle1.last_modified
version1 = bundle1.version
self.assertNotEquals(last_modified0, last_modified1)
self.assertNotEquals(version0, version1)
# check if the previous attachment is correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
def test_11_paginated_css_content_invalidation(self):
""" Checks that a bundle is invalidated when its content is modified by adding a file to
source.
"""
bundle0 = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
bundle0.css()
html0 = bundle0.html
version0 = bundle0.version
self.assertEquals(len(self._any_ira_for_bundle('css')), 3)
view_arch = """
<data>
<xpath expr="." position="inside">
<link rel="stylesheet" href="/test_assetsbundle/static/src/css/test_cssfile2.css"/>
</xpath>
</data>
"""
bundle_id = self.browse_ref(self.cssbundle_xmlid).id
newid = self.registry['ir.ui.view'].create(self.cr, self.uid, {
'name': 'test bundle inheritance',
'type': 'qweb',
'arch': view_arch,
'inherit_id': bundle_id,
})
bundle1 = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={'check_view_ids': [newid]}, registry=self.registry, max_css_rules=1)
bundle1.css()
html1 = bundle1.html
version1 = bundle1.version
self.assertNotEquals(html0, html1)
self.assertNotEquals(version0, version1)
# check if the previous attachment are correctly cleaned
self.assertEquals(len(self._any_ira_for_bundle('css')), 4)
def test_12_paginated_css_debug(self):
""" Check that a bundle in debug mode outputs non-minified assets.
"""
debug_bundle = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
content = debug_bundle.to_html(debug=True)
# find back one of the original asset file
self.assertIn('/test_assetsbundle/static/src/css/test_cssfile1.css', content)
# there shouldn't be any assets created in debug mode
self.assertEquals(len(self._any_ira_for_bundle('css')), 0)
def test_13_paginated_css_order(self):
# self.cssbundle_xlmid contains 3 rules
self.bundle = AssetsBundle(self.cssbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry, max_css_rules=1)
stylesheets = self.bundle.css()
self.assertTrue(stylesheets[0].url.endswith('.0.css'))
self.assertTrue(stylesheets[1].url.endswith('.1.css'))
self.assertTrue(stylesheets[2].url.endswith('.2.css'))
class TestAssetsBundleInBrowser(HttpCase):
def test_01_js_interpretation(self):
""" Checks that the javascript of a bundle is correctly interpreted.
"""
self.phantom_js(
"/test_assetsbundle/js",
"a + b + c === 6 ? console.log('ok') : console.log('error')",
login="admin"
)
def test_02_js_interpretation_inline(self):
""" Checks that the javascript of a bundle is correctly interpretet when mixed with inline.
"""
with self.registry.cursor() as test_cursor:
view_arch = """
<data>
<xpath expr="." position="inside">
<script type="text/javascript">
var d = 4;
</script>
</xpath>
</data>
"""
self.registry['ir.ui.view'].create(test_cursor, self.uid, {
'name': 'test bundle inheritance inline js',
'type': 'qweb',
'arch': view_arch,
'inherit_id': self.browse_ref('test_assetsbundle.bundle1').id,
})
self.phantom_js(
"/test_assetsbundle/js",
"a + b + c + d === 10 ? console.log('ok') : console.log('error')",
login="admin",
)
class TestAssetsBundleWithIRAMock(TransactionCase):
def setUp(self):
super(TestAssetsBundleWithIRAMock, self).setUp()
self.lessbundle_xmlid = 'test_assetsbundle.bundle3'
self.patcher1 = patch('openerp.addons.base.ir.ir_attachment.ir_attachment.create', wraps=self.registry['ir.attachment'].create)
self.patcher2 = patch('openerp.addons.base.ir.ir_attachment.ir_attachment.unlink', wraps=self.registry['ir.attachment'].unlink)
self.mock_ira_create = self.patcher1.start()
self.mock_ira_unlink = self.patcher2.start()
def _bundle(self, should_create, should_unlink):
self.mock_ira_create.reset_mock()
self.mock_ira_unlink.reset_mock()
AssetsBundle(self.lessbundle_xmlid, cr=self.cr, uid=self.uid, context={}, registry=self.registry).to_html(debug=True)
self.assertEquals(self.mock_ira_create.call_count, int(should_create))
self.assertEquals(self.mock_ira_unlink.call_count, int(should_unlink))
def test_01_debug_mode_assets(self):
""" Checks that the ir.attachments records created for compiled less assets in debug mode
are correctly invalidated.
"""
# Compile for the first time
self._bundle(True, False)
# Compile a second time, without changes
self._bundle(False, False)
# Touch the file and compile a third time
path = get_resource_path('test_assetsbundle', 'static', 'src', 'less', 'test_lessfile1.less')
t = time.time() + 5
utime(path, (t, t)) # touch
self._bundle(True, True)
# Because we are in the same transaction since the beginning of the test, the first asset
# created and the second one have the same write_date, but the file's last modified date
# has really been modified. If we do not update the write_date to a posterior date, we are
# not able to reproduce the case where we compile this bundle again without changing
# anything.
self.cr.execute("update ir_attachment set write_date=clock_timestamp() + interval '10 seconds' where id = (select max(id) from ir_attachment)")
# Compile a fourth time, without changes
self._bundle(False, False)
def tearDown(self):
self.patcher2.stop()
self.patcher1.stop()
super(TestAssetsBundleWithIRAMock, self).tearDown()
|
jrhege/restaq
|
refs/heads/master
|
restaq/models.py
|
1
|
""" The models module contains database-agnostic descriptors of collections,
collection components, and database operations
"""
import weakref
import exc
import expressions
from helpers import Lookup
class DatabaseOperation(object):
""" Generic operation to be performed against the database """
def __init__(self, func_name, *args, **kwargs):
""" Initialize a new DatabaseOperation
:param func_name: Name of function for Database to execute
:param args: Args for database operation function
:param kwargs: Keyword args for database operation function
"""
self.func_name = func_name
self.args = args
self.kwargs = kwargs
class CollectionComponent(object):
""" Represents a generic component (key, field, index, etc) of a collection """
def __init__(self, name):
""" Initialize a generic collection component representation
:param name: Name of the component
"""
self.name = name
self._collection = None
def associate_with_collection(self, collection):
""" Associate this index with a collection
:param collection: Collection object to associate it with
"""
self._collection = weakref.proxy(collection)
class Index(CollectionComponent):
""" Represents a index-style object on a collection """
# Prefix to apply to an auto-generated name
generated_name_prefix = u"IDX_"
def __init__(self, field_names, name=None):
""" Initialize an index representation
:param name: Name of the index
:param field_names: Iterable of names of the fields that make up the index
"""
# Holds names of key index fields
self.field_names = self._field_names_to_tuple(field_names)
# Holds reference to the function that reads an item's index key
self._key_generating_func = self._choose_key_generating_func()
super(Index, self).__init__(
name=name or self._generate_name()
)
def _generate_name(self):
""" Generate a default name for this index
:return: Name for this index
"""
# Combine field names into an underscore delimited string
field_names = u"_".join(self.field_names).upper()
return u"{prefix}{field_names}".format(
prefix=self.generated_name_prefix,
field_names=field_names
)
def _choose_key_generating_func(self):
""" Select an appropriate function for generating item keys
:return: Function
"""
# Function chosen depends on how many fields
# make up the index
if len(self.field_names) == 1:
return self._generate_scalar_key
else:
return self._generate_compound_key
def item_index_key(self, item):
""" Read the index key of an item
:param item: Item to generate a key for
:return: Key for the item as scalar or tuple
"""
return self._key_generating_func(
item=item,
field_names=self.field_names
)
@staticmethod
def _generate_scalar_key(item, field_names):
""" Generate a simple (non-compound) key
:param item: Item to generate key for
:param field_names: Tuple of length 1 containing name of field to use for the key
:return: Unique key for the item
"""
return item[field_names[0]]
@staticmethod
def _generate_compound_key(item, field_names):
""" Generate a multi-part key
:param item: Item to generate key for
:param field_names: Tuple of field names to read for key
:return: Item key as a tuple
"""
return tuple(
item[key_part]
for key_part
in field_names
)
@staticmethod
def _field_names_to_tuple(field_names):
""" Convert an iterable or string of field name(s) to a tuple """
if isinstance(field_names, basestring):
return field_names,
else:
return tuple(field_names)
class PrimaryKey(Index):
""" Represents a primary key (unique record identifier) """
# Prefix to apply to an auto-generated name
generated_name_prefix = u"PK_"
class Field(CollectionComponent):
""" Represents a field in a collection item """
def __init__(self, name, data_types=None, required=False, nullable=True):
""" Initialize a field representation
:param name: Name of the field
:param data_types: Set of Python data types of the field
:param required: If True, inserts will require this field be provided
:param nullable: If True, inserts and updates will accept Nones for values
"""
super(Field, self).__init__(
name=name
)
self.data_types = data_types or set()
self.required = required
self.nullable = nullable
def __repr__(self):
return u"{class_name}(name={name}, data_types={data_types})".format(
class_name=type(self).__name__,
name=repr(self.name),
data_types=repr(self.data_types)
)
class Collection(object):
def __init__(self, name, *args):
""" Initialize a collection (table) representation
:param name: Name of the collection in the database
:param args: Associated field, index, and key objects
"""
self.name = name
self.primary_key = None
# Holds fields by name
self._fields_by_name = {}
# Indexes can be looked up either by name
# or by the names of fields that make them up
self._indexes_by_field = {}
self._indexes_by_name = {}
# Iterate through args, saving each associated
# object in the appropriate place
self._associate_all(args)
# Initialize a lookup for fields
self.fields = Lookup(self._fields_by_name)
# Initialize a lookup for indexes
self.indexes = Lookup(self._indexes_by_name)
def __repr__(self):
return u"{class_name}(name={name})".format(
class_name=type(self).__name__,
name=repr(self.name)
)
@classmethod
def create_collection_from_items(cls, items, collection_name, primary_key=None):
""" Create a new collection from an iterable of items, then upload them
:param items: Iterable of items providing collection data and structure
:param collection_name: Name of collection to create
:param primary_key: Tuple of field name(s) to use as primary key
:return: DatabaseOperation creating collection
"""
return DatabaseOperation(
func_name=u"create_collection_from_items",
items=items,
collection_name=collection_name,
primary_key=primary_key
)
def create_collection(self):
""" Generate operation creating this collection in the database
:return: DatabaseOperation creating collection
"""
return DatabaseOperation(
func_name=u"create_collection",
collection=self
)
def update_item(self, key, values, output_fields=None):
""" Generate operation updating the attributes of an item
:param key: Key of item to update
:param values: Dict of new values to apply
:param output_fields: Optional list of fields to return
:return: DatabaseOperation for updating item
"""
return DatabaseOperation(
func_name=u"update_item",
key=key,
values=values,
output_fields=output_fields,
collection=self
)
def delete_item(self, key, output_fields=None):
""" Generate operation to delete an item by its key
:param key: Key of item to delete
:param output_fields: Optional fields to output
:return: DatabaseOperation for deleting item
"""
return DatabaseOperation(
func_name=u"delete_item",
key=key,
output_fields=output_fields,
collection=self
)
def select_item(self, key, output_fields=None):
""" Generate operation to retrieve one item from collection by its primary key
:param key: Key to search for
:param output_fields: Fields to return
:return: DatabaseOperation for fetching item
"""
# None for output fields returns all
if output_fields is None:
output_fields = self.fields
return DatabaseOperation(
func_name=u"select_item",
key=key,
output_fields=output_fields,
collection=self
)
def insert_item(self, item):
""" Generate operation to insert an item into collection
:param item: Item to insert
:return: DatabaseOperation for inserting item
"""
return DatabaseOperation(
func_name=u"insert_item",
item=item,
collection=self
)
def insert(self, items):
""" Generate operation to insert items in iterable into collection
:param items: Iterable of items to insert into collection
:return: DatabaseOperation for inserting items
"""
return DatabaseOperation(
func_name=u"insert",
items=items,
collection=self
)
def count(self, filters=None):
""" Generate operation to count number of items meeting some criteria
:param filters: Filters to apply pre-count
:return: DatabaseOperation that counts items meeting filters
"""
return DatabaseOperation(
func_name=u"count",
filters=filters,
collection=self
)
def select(self, filters=None, output_fields=None, output_sort=None, limit=None, distinct=False):
""" Generate operation to query and return items
:param filters: Filters to apply
:param output_fields: Fields to output
:param output_sort: Sort to apply
:param limit: Maximum number of records to return
:param distinct: If True, only distinct records are returned
:return: DatabaseOperation that queries and returns items
"""
# None for output fields returns all
if output_fields is None:
output_fields = self.fields
return DatabaseOperation(
func_name=u"select",
filters=filters,
output_fields=output_fields,
output_sort=output_sort,
limit=limit,
distinct=distinct,
collection=self
)
def delete(self, filters=None, output_fields=None, output_sort=None, limit=None, distinct=False):
""" Generate operation to delete items matching filters
:param filters: Filters items must match
:param output_fields: Fields to include in returned items
:param output_sort: Desired output sort for deleted items
:param limit: Maximum number of deleted items to RETURN
:param distinct: If True, only distinct items are RETURNED
:return: DatabaseOperation that deletes matching items
"""
return DatabaseOperation(
func_name=u"delete",
filters=filters,
output_fields=output_fields,
output_sort=output_sort,
limit=limit,
distinct=distinct,
collection=self
)
def update(self, values, filters=None, output_fields=None, output_sort=None, limit=None, distinct=False):
""" Generate operation to update items matching filters
:param values: Dict of new values to apply
:param filters: Filters items must match
:param output_fields: Fields to include in returned items
:param output_sort: Desired output sort for updated items
:param limit: Maximum number of updated items to RETURN
:param distinct: If True, only distinct items are RETURNED
:return: DatabaseOperation that updates matching items
"""
return DatabaseOperation(
func_name=u"update",
values=values,
filters=filters,
output_fields=output_fields,
output_sort=output_sort,
limit=limit,
distinct=distinct,
collection=self
)
def item_primary_key(self, item):
""" Returns the primary key of an item
:param item: Item to return primary key for
:return: Primary key of item as key or tuple
"""
return self.primary_key.item_index_key(
item=item
)
def _associate_all(self, collection_components):
""" Associate child components with this collection
:param collection_components: An iterable of Index, Field, or similar objects
"""
for collection_component in collection_components:
# Tell component to reference me (via weakref)
try:
collection_component.associate_with_collection(self)
except AttributeError:
self._raise_not_component_error(collection_component)
# Handle if it's a field or index-style object
if isinstance(collection_component, Field):
self._associate_field(collection_component)
elif isinstance(collection_component, Index):
self._associate_index(collection_component)
def _associate_index(self, index):
""" Save association between an index object and this collection
:param index: Index to associate with this collection
"""
# Save index in a name lookup
self._indexes_by_name[index.name] = index
# Save index in a field name lookup
self._indexes_by_field[index.field_names] = index
# If this is a primary key, also associate it
# as such
if isinstance(index, PrimaryKey):
self._associate_primary_key(index)
def _associate_primary_key(self, primary_key):
""" Save association between a primary key and this collection
:param primary_key: Primary key to associate with this collection
"""
# Handle if a primary key has already been defined
if self.primary_key is not None:
self._raise_duplicate_pk_error()
# Save primary key association
self.primary_key = primary_key
@staticmethod
def _raise_not_component_error(component):
""" Raise a NotCollectionComponentError """
msg = u"{component_type} is not a valid component type".format(
component_type=type(component).__name__
)
raise exc.NotCollectionComponentError(msg)
def _raise_duplicate_pk_error(self):
""" Raise a MultiplePrimaryKeyError """
msg = u"Primary key already declared on {collection_name}".format(
collection_name=self.name
)
raise exc.MultiplePrimaryKeyError(msg)
def _associate_field(self, field):
""" Save association between a field and this collection
:param field: Field to associate with this collection
"""
# Get the name of the field
field_name = field.name
self._fields_by_name[field_name] = expressions.FieldExpression(
field=field,
alias=field_name
)
@property
def f(self):
""" Get the fields in this collection
:return: Lookup of fields in collection
"""
return self.fields
|
loop1024/pymo-global
|
refs/heads/master
|
android/pgs4a-0.9.6/python-install/lib/python2.7/distutils/msvc9compiler.py
|
32
|
"""distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS2005 and VS 2008 by Christian Heimes
__revision__ = "$Id$"
import os
import subprocess
import sys
import re
from distutils.errors import (DistutilsExecError, DistutilsPlatformError,
CompileError, LibError, LinkError)
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
import _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
HKEYS = (_winreg.HKEY_USERS,
_winreg.HKEY_CURRENT_USER,
_winreg.HKEY_LOCAL_MACHINE,
_winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Wow6432Node\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targetting amd64.)
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
# trying Express edition
if productdir is None:
vsbase = VSEXPRESS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
log.debug("Unable to find productdir in registry")
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
# More globals
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
# MACROS = MacroExpander(VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
# take care to only use strings in the environment.
self.__paths = vc_env['path'].encode('mbcs').split(os.pathsep)
os.environ['lib'] = vc_env['lib'].encode('mbcs')
os.environ['include'] = vc_env['include'].encode('mbcs')
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
# Embedded manifests are recommended - see MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can embed it later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
if target_desc == CCompiler.EXECUTABLE:
mfid = 1
else:
mfid = 2
self._remove_visual_c_ref(temp_manifest)
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
temp_manifest, out_arg])
except DistutilsExecError, msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
finally:
manifest_f.close()
except IOError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
|
otherness-space/myProject
|
refs/heads/master
|
my_project_001/lib/python2.7/site-packages/pip/status_codes.py
|
1002
|
from __future__ import absolute_import
SUCCESS = 0
ERROR = 1
UNKNOWN_ERROR = 2
VIRTUALENV_NOT_FOUND = 3
PREVIOUS_BUILD_DIR_ERROR = 4
NO_MATCHES_FOUND = 23
|
rohitwaghchaure/alec_frappe5_erpnext
|
refs/heads/develop
|
erpnext/hr/doctype/expense_claim/test_expense_claim.py
|
27
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_records = frappe.get_test_records('Expense Claim')
class TestExpenseClaim(unittest.TestCase):
def test_total_expense_claim_for_project(self):
frappe.db.sql("""delete from `tabTask` where project = "_Test Project 1" """)
frappe.db.sql("""delete from `tabProject` where name = "_Test Project 1" """)
frappe.get_doc({
"project_name": "_Test Project 1",
"doctype": "Project",
"tasks" :
[{ "title": "_Test Project Task 1", "status": "Open" }]
}).save()
task_name = frappe.db.get_value("Task",{"project": "_Test Project 1"})
expense_claim = frappe.get_doc({
"doctype": "Expense Claim",
"employee": "_T-Employee-0001",
"approval_status": "Approved",
"project": "_Test Project 1",
"task": task_name,
"expenses":
[{ "expense_type": "Food", "claim_amount": 300, "sanctioned_amount": 200 }]
})
expense_claim.submit()
self.assertEqual(frappe.db.get_value("Task", task_name, "total_expense_claim"), 200)
self.assertEqual(frappe.db.get_value("Project", "_Test Project 1", "total_expense_claim"), 200)
expense_claim2 = frappe.get_doc({
"doctype": "Expense Claim",
"employee": "_T-Employee-0001",
"approval_status": "Approved",
"project": "_Test Project 1",
"task": task_name,
"expenses":
[{ "expense_type": "Food", "claim_amount": 600, "sanctioned_amount": 500 }]
})
expense_claim2.submit()
self.assertEqual(frappe.db.get_value("Task", task_name, "total_expense_claim"), 700)
self.assertEqual(frappe.db.get_value("Project", "_Test Project 1", "total_expense_claim"), 700)
expense_claim2.cancel()
self.assertEqual(frappe.db.get_value("Task", task_name, "total_expense_claim"), 200)
self.assertEqual(frappe.db.get_value("Project", "_Test Project 1", "total_expense_claim"), 200)
|
croxis/allegiance2
|
refs/heads/master
|
server.py
|
1
|
# ## Python 3 look ahead imports ###
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from direct.directnotify.DirectNotify import DirectNotify
import spacedrive
from networking.server_net import NetworkSystem
__author__ = 'croxis'
def setup(name='Server Name'):
log = DirectNotify().newCategory("Allegiance-Server-" + name)
log.debug("Loading space drive")
spacedrive.init(run_server=True,
run_client=False,
log_level='debug',
window_title='Allegiance 2')
log.info("Setting up server network")
spacedrive.init_server_net(NetworkSystem, port=47624)
def start():
spacedrive.run()
|
naousse/odoo
|
refs/heads/8.0
|
addons/l10n_cl/__init__.py
|
2120
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wangxuan007/flasky
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py
|
20
|
# engine/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and its public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
from .interfaces import (
Connectable,
Dialect,
ExecutionContext,
ExceptionContext,
# backwards compat
Compiled,
TypeCompiler
)
from .base import (
Connection,
Engine,
NestedTransaction,
RootTransaction,
Transaction,
TwoPhaseTransaction,
)
from .result import (
BaseRowProxy,
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
FullyBufferedResultProxy,
ResultProxy,
RowProxy,
)
from .util import (
connection_memoize
)
from . import util, strategies
# backwards compat
from ..sql import ddl
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
.. versionchanged:: 0.8
By default, result row names match case-sensitively.
In version 0.7 and prior, all matches were case-insensitive.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a per-:class:`.Connection`
basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`Postgresql Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ
<http://www.sqlalchemy.org/trac/wiki/FAQ#HowcanIgettheCREATETABLEDROPTABLEoutputasastring>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file.
The keys of interest to ``engine_from_config()`` should be prefixed, e.g.
``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument
indicates the prefix to be searched for. Each matching key (after the
prefix is stripped) is treated as though it were the corresponding keyword
argument to a :func:`.create_engine` call.
The only required key is (assuming the default prefix) ``sqlalchemy.url``,
which provides the :ref:`database URL <database_urls>`.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. The set of arguments
is extensible per-dialect using the ``engine_config_types`` accessor.
:param configuration: A dictionary (typically produced from a config file,
but this is not a requirement). Items whose keys start with the value
of 'prefix' will have that prefix stripped, and will then be passed to
:ref:`create_engine`.
:param prefix: Prefix to match and then strip from keys
in 'configuration'.
:param kwargs: Each keyword argument to ``engine_from_config()`` itself
overrides the corresponding item taken from the 'configuration'
dictionary. Keyword arguments should *not* be prefixed.
"""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
options['_coerce_config'] = True
options.update(kwargs)
url = options.pop('url')
return create_engine(url, **options)
__all__ = (
'create_engine',
'engine_from_config',
)
|
slohse/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_acl.py
|
52
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_acl
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages access list entries for ACLs.
description:
- Manages access list entries for ACLs.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the ACE if it exists.
- C(state=delete_acl) deletes the ACL if it exists.
- For idempotency, use port numbers for the src/dest port
params like I(src_port1) and names for the well defined protocols
for the I(proto) param.
- Although this module is idempotent in that if the ace as presented in
the task is identical to the one on the switch, no changes will be made.
If there is any difference, what is in Ansible will be pushed (configured
options will be overridden). This is to improve security, but at the
same time remember an ACE is removed, then re-added, so if there is a
change, the new ACE will be exactly what parameters you are sending to
the module.
options:
seq:
description:
- Sequence number of the entry (ACE).
name:
description:
- Case sensitive name of the access list (ACL).
required: true
action:
description:
- Action of the ACE.
choices: ['permit', 'deny', 'remark']
remark:
description:
- If action is set to remark, this is the description.
proto:
description:
- Port number or protocol (as supported by the switch).
src:
description:
- Source ip and mask using IP/MASK notation and
supports keyword 'any'.
src_port_op:
description:
- Source port operands such as eq, neq, gt, lt, range.
choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
src_port1:
description:
- Port/protocol and also first (lower) port when using range
operand.
src_port2:
description:
- Second (end) port when using range operand.
dest:
description:
- Destination ip and mask using IP/MASK notation and supports the
keyword 'any'.
dest_port_op:
description:
- Destination port operands such as eq, neq, gt, lt, range.
choices: ['any', 'eq', 'gt', 'lt', 'neq', 'range']
dest_port1:
description:
- Port/protocol and also first (lower) port when using range
operand.
dest_port2:
description:
- Second (end) port when using range operand.
log:
description:
- Log matches against this entry.
choices: ['enable']
urg:
description:
- Match on the URG bit.
choices: ['enable']
ack:
description:
- Match on the ACK bit.
choices: ['enable']
psh:
description:
- Match on the PSH bit.
choices: ['enable']
rst:
description:
- Match on the RST bit.
choices: ['enable']
syn:
description:
- Match on the SYN bit.
choices: ['enable']
fin:
description:
- Match on the FIN bit.
choices: ['enable']
established:
description:
- Match established connections.
choices: ['enable']
fragments:
description:
- Check non-initial fragments.
choices: ['enable']
time_range:
description:
- Name of time-range to apply.
precedence:
description:
- Match packets with given precedence.
choices: ['critical', 'flash', 'flash-override', 'immediate',
'internet', 'network', 'priority', 'routine']
dscp:
description:
- Match packets with given dscp value.
choices: ['af11', 'af12', 'af13', 'af21', 'af22', 'af23','af31','af32',
'af33', 'af41', 'af42', 'af43', 'cs1', 'cs2', 'cs3', 'cs4',
'cs5', 'cs6', 'cs7', 'default', 'ef']
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent','delete_acl']
'''
EXAMPLES = '''
# configure ACL ANSIBLE
- nxos_acl:
name: ANSIBLE
seq: 10
action: permit
proto: tcp
src: 192.0.2.1/24
dest: any
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip access-list ANSIBLE", "10 permit tcp 192.0.2.1/24 any"]
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
return body
def get_acl(module, acl_name, seq_number):
command = 'show ip access-list'
new_acl = []
saveme = {}
acl_body = {}
body = execute_show_command(command, module)[0]
if body:
all_acl_body = body['TABLE_ip_ipv6_mac']['ROW_ip_ipv6_mac']
else:
# no access-lists configured on the device
return {}, []
if isinstance(all_acl_body, dict):
# Only 1 ACL configured.
if all_acl_body.get('acl_name') == acl_name:
acl_body = all_acl_body
else:
for acl in all_acl_body:
if acl.get('acl_name') == acl_name:
acl_body = acl
break
try:
acl_entries = acl_body['TABLE_seqno']['ROW_seqno']
acl_name = acl_body.get('acl_name')
except KeyError: # could be raised if no ACEs are configured for an ACL
return {}, [{'acl': 'no_entries'}]
if isinstance(acl_entries, dict):
acl_entries = [acl_entries]
for each in acl_entries:
temp = {}
options = {}
remark = each.get('remark')
temp['name'] = acl_name
temp['seq'] = str(each.get('seqno'))
if remark:
temp['remark'] = remark
temp['action'] = 'remark'
else:
temp['action'] = each.get('permitdeny')
temp['proto'] = str(each.get('proto', each.get('proto_str', each.get('ip'))))
temp['src'] = each.get('src_any', each.get('src_ip_prefix'))
temp['src_port_op'] = each.get('src_port_op')
temp['src_port1'] = each.get('src_port1_num')
temp['src_port2'] = each.get('src_port2_num')
temp['dest'] = each.get('dest_any', each.get('dest_ip_prefix'))
temp['dest_port_op'] = each.get('dest_port_op')
temp['dest_port1'] = each.get('dest_port1_num')
temp['dest_port2'] = each.get('dest_port2_num')
options['log'] = each.get('log')
options['urg'] = each.get('urg')
options['ack'] = each.get('ack')
options['psh'] = each.get('psh')
options['rst'] = each.get('rst')
options['syn'] = each.get('syn')
options['fin'] = each.get('fin')
options['established'] = each.get('established')
options['dscp'] = each.get('dscp_str')
options['precedence'] = each.get('precedence_str')
options['fragments'] = each.get('fragments')
options['time_range'] = each.get('timerange')
keep = {}
for key, value in temp.items():
if value:
keep[key] = value
options_no_null = {}
for key, value in options.items():
if value is not None:
options_no_null[key] = value
keep['options'] = options_no_null
if keep.get('seq') == seq_number:
saveme = dict(keep)
new_acl.append(keep)
return saveme, new_acl
def _acl_operand(operand, srcp1, sprcp2):
sub_entry = ' ' + operand
if operand == 'range':
sub_entry += ' ' + srcp1 + ' ' + sprcp2
else:
sub_entry += ' ' + srcp1
return sub_entry
def config_core_acl(proposed):
seq = proposed.get('seq')
action = proposed.get('action')
remark = proposed.get('remark')
proto = proposed.get('proto')
src = proposed.get('src')
src_port_op = proposed.get('src_port_op')
src_port1 = proposed.get('src_port1')
src_port2 = proposed.get('src_port2')
dest = proposed.get('dest')
dest_port_op = proposed.get('dest_port_op')
dest_port1 = proposed.get('dest_port1')
dest_port2 = proposed.get('dest_port2')
ace_start_entries = [action, proto, src]
if not remark:
ace = seq + ' ' + ' '.join(ace_start_entries)
if src_port_op:
ace += _acl_operand(src_port_op, src_port1, src_port2)
ace += ' ' + dest
if dest_port_op:
ace += _acl_operand(dest_port_op, dest_port1, dest_port2)
else:
ace = seq + ' remark ' + remark
return ace
def config_acl_options(options):
ENABLE_ONLY = ['psh', 'urg', 'log', 'ack', 'syn',
'established', 'rst', 'fin', 'fragments',
'log']
OTHER = ['dscp', 'precedence', 'time-range']
# packet-length is the only option not currently supported
if options.get('time_range'):
options['time-range'] = options.get('time_range')
options.pop('time_range')
command = ''
for option, value in options.items():
if option in ENABLE_ONLY:
if value == 'enable':
command += ' ' + option
elif option in OTHER:
command += ' ' + option + ' ' + value
if command:
command = command.strip()
return command
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def main():
argument_spec = dict(
seq=dict(required=False, type='str'),
name=dict(required=True, type='str'),
action=dict(required=False, choices=['remark', 'permit', 'deny']),
remark=dict(required=False, type='str'),
proto=dict(required=False, type='str'),
src=dict(required=False, type='str'),
src_port_op=dict(required=False),
src_port1=dict(required=False, type='str'),
src_port2=dict(required=False, type='str'),
dest=dict(required=False, type='str'),
dest_port_op=dict(required=False),
dest_port1=dict(required=False, type='str'),
dest_port2=dict(required=False, type='str'),
log=dict(required=False, choices=['enable']),
urg=dict(required=False, choices=['enable']),
ack=dict(required=False, choices=['enable']),
psh=dict(required=False, choices=['enable']),
rst=dict(required=False, choices=['enable']),
syn=dict(required=False, choices=['enable']),
fragments=dict(required=False, choices=['enable']),
fin=dict(required=False, choices=['enable']),
established=dict(required=False, choices=['enable']),
time_range=dict(required=False),
precedence=dict(required=False, choices=['critical', 'flash',
'flash-override',
'immediate', 'internet',
'network', 'priority',
'routine']),
dscp=dict(required=False, choices=['af11', 'af12', 'af13', 'af21',
'af22', 'af23', 'af31', 'af32',
'af33', 'af41', 'af42', 'af43',
'cs1', 'cs2', 'cs3', 'cs4',
'cs5', 'cs6', 'cs7', 'default',
'ef']),
state=dict(choices=['absent', 'present', 'delete_acl'], default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
state = module.params['state']
action = module.params['action']
remark = module.params['remark']
dscp = module.params['dscp']
precedence = module.params['precedence']
seq = module.params['seq']
name = module.params['name']
seq = module.params['seq']
if action == 'remark' and not remark:
module.fail_json(msg='when state is action, remark param is also required')
REQUIRED = ['seq', 'name', 'action', 'proto', 'src', 'dest']
ABSENT = ['name', 'seq']
if state == 'present':
if action and remark and seq:
pass
else:
for each in REQUIRED:
if module.params[each] is None:
module.fail_json(msg="req'd params when state is present:",
params=REQUIRED)
elif state == 'absent':
for each in ABSENT:
if module.params[each] is None:
module.fail_json(msg='require params when state is absent',
params=ABSENT)
elif state == 'delete_acl':
if module.params['name'] is None:
module.fail_json(msg="param name req'd when state is delete_acl")
if dscp and precedence:
module.fail_json(msg='only one of the params dscp/precedence '
'are allowed')
OPTIONS_NAMES = ['log', 'urg', 'ack', 'psh', 'rst', 'syn', 'fin',
'established', 'dscp', 'precedence', 'fragments',
'time_range']
CORE = ['seq', 'name', 'action', 'proto', 'src', 'src_port_op',
'src_port1', 'src_port2', 'dest', 'dest_port_op',
'dest_port1', 'dest_port2', 'remark']
proposed_core = dict((param, value) for (param, value) in
module.params.items()
if param in CORE and value is not None)
proposed_options = dict((param, value) for (param, value) in
module.params.items()
if param in OPTIONS_NAMES and value is not None)
proposed = {}
proposed.update(proposed_core)
proposed.update(proposed_options)
existing_options = {}
# getting existing existing_core=dict, acl=list, seq=list
existing_core, acl = get_acl(module, name, seq)
if existing_core:
existing_options = existing_core.get('options')
existing_core.pop('options')
commands = []
delta_core = {}
delta_options = {}
if not existing_core.get('remark'):
dcore = dict(
set(proposed_core.items()).difference(
existing_core.items())
)
if not dcore:
# check the diff in the other way just in case
dcore = dict(
set(existing_core.items()).difference(
proposed_core.items())
)
delta_core = dcore
if delta_core:
delta_options = proposed_options
else:
doptions = dict(
set(proposed_options.items()).difference(
existing_options.items())
)
# check the diff in the other way just in case
if not doptions:
doptions = dict(
set(existing_options.items()).difference(
proposed_options.items())
)
delta_options = doptions
else:
delta_core = dict(
set(proposed_core.items()).difference(
existing_core.items())
)
if state == 'present':
if delta_core or delta_options:
if existing_core: # if the ace exists already
commands.append(['no {0}'.format(seq)])
if delta_options:
myacl_str = config_core_acl(proposed_core)
myacl_str += ' ' + config_acl_options(proposed_options)
else:
myacl_str = config_core_acl(proposed_core)
command = [myacl_str]
commands.append(command)
elif state == 'absent':
if existing_core:
commands.append(['no {0}'.format(seq)])
elif state == 'delete_acl':
if acl[0].get('acl') != 'no_entries':
commands.append(['no ip access-list {0}'.format(name)])
cmds = []
if commands:
preface = []
if state in ['present', 'absent']:
preface = ['ip access-list {0}'.format(name)]
commands.insert(0, preface)
cmds = flatten_list(commands)
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
results['changed'] = True
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
|
UNINETT/nav
|
refs/heads/master
|
python/nav/eventengine/plugins/maintenancestate.py
|
2
|
#
# Copyright (C) 2012 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""maintenance handler plugin"""
from nav.eventengine.plugin import EventHandler
from nav.eventengine.alerts import AlertGenerator
class MaintenanceStateHandler(EventHandler):
"""Accepts maintenanceState events"""
handled_types = ('maintenanceState', )
def handle(self):
event = self.event
if event.state == event.STATE_STATELESS:
self._logger.info('Ignoring stateless maintenanceState event')
else:
self._post_alert(event)
event.delete()
def _post_alert(self, event):
alert = AlertGenerator(event)
alert.alert_type = (
'onMaintenance' if event.state == event.STATE_START
else 'offMaintenance')
alert.history_vars = dict(alert)
if alert.is_event_duplicate():
self._logger.info('Ignoring duplicate event')
else:
alert.post()
|
wglas85/math
|
refs/heads/master
|
tests/test/mathx/tegral_tests.py
|
1
|
'''
Created on 19.10.2015
@author: michi
'''
import logging
import unittest
from mathx.tegral import tegral
import math
handler = logging.StreamHandler(open('/dev/stderr', 'w'))
formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.DEBUG)
class Test(unittest.TestCase):
def test_integrate(self):
func = lambda x: math.sqrt(1.0-x*x)
res = tegral(func,-1,1,tol=1.0e-15)
self.assertAlmostEqual(math.pi*0.5,res,places=13)
if __name__ == "__main__":
unittest.main()
|
SPIhub/hummingbird
|
refs/heads/master
|
tests/test_examples.py
|
2
|
from test_basics import run_hummingbird
from test_imports import test_import_backend_lcls
import os, sys, warnings
__thisdir__ = os.path.dirname(os.path.realpath(__file__))
# Testing basic examples
def test_dummy_example():
run_hummingbird(conf=__thisdir__ + '/../examples/basic/dummy.py')
def test_simulation_example():
run_hummingbird(conf=__thisdir__ + '/../examples/basic/simulation.py')
def test_detector_example():
run_hummingbird(conf=__thisdir__ + '/../examples/basic/detector.py')
def test_hitfinding_example():
run_hummingbird(conf=__thisdir__ + '/../examples/basic/hitfinding.py')
def test_correlation_example():
run_hummingbird(conf=__thisdir__ + '/../examples/basic/correlation.py')
def test_lcls_mimi_dark_example():
if test_import_backend_lcls():
run_hummingbird(conf=__thisdir__ + '/../examples/lcls/mimi_dark.py')
else:
warnings.warn(UserWarning("The LCLS backend is not available and can therefore not be tested..."))
def test_lcls_mimi_hits_example():
if test_import_backend_lcls():
run_hummingbird(conf=__thisdir__ + '/../examples/lcls/mimi_hits.py')
else:
warnings.warn(UserWarning("The LCLS backend is not available and can therefore not be tested..."))
|
egenerat/gae-django
|
refs/heads/master
|
django/contrib/formtools/models.py
|
13
|
""" models.py (even empty) currently required by the runtests.py to enable unit tests """
|
meetsandeepan/meetsandeepan.github.io
|
refs/heads/master
|
node_modules/pygmentize-bundled/vendor/pygments/pygments/__init__.py
|
269
|
# -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.6'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
|
go-bears/rally
|
refs/heads/master
|
rally/plugins/openstack/context/network/existing_network.py
|
3
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import consts
from rally import osclients
from rally.plugins.openstack.wrappers import network as network_wrapper
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="existing_network", order=349)
class ExistingNetwork(context.Context):
"""This context supports using existing networks in Rally.
This context should be used on a deployment with existing users.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False
}
@utils.log_task_wrapper(LOG.info, _("Enter context: `existing_network`"))
def setup(self):
for user, tenant_id in utils.iterate_per_tenants(
self.context.get("users", [])):
net_wrapper = network_wrapper.wrap(
osclients.Clients(user["endpoint"]),
self.config)
self.context["tenants"][tenant_id]["networks"] = (
net_wrapper.list_networks())
@utils.log_task_wrapper(LOG.info, _("Exit context: `existing_network`"))
def cleanup(self):
"""Networks were not created by Rally, so nothing to do."""
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/333_test_urllib.py
|
24
|
"""Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from test import support
import os
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(fakedata)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(IOError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
# 3.3 - URLError is not captured, explicit IOError is raised.
with self.assertRaises(IOError):
urlopen('file://localhost/a/file/which/doesnot/exists.py')
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
# 3.3 - IOError instead of URLError
with self.assertRaises(IOError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
# 3.3 - IOError instead of URLError
with self.assertRaises(IOError):
urlopen(test_ftp_url)
def test_ftp_nonexisting(self):
# 3.3 - IOError instead of URLError
with self.assertRaises(IOError):
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
self.assertEqual(('user', 'a b'),urllib.parse.splitpasswd('user:a b'))
self.assertEqual(('user 2', 'ab'),urllib.parse.splitpasswd('user 2:ab'))
self.assertEqual(('user+1', 'a+b'),urllib.parse.splitpasswd('user+1:a+b'))
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
|
tensorflow/tpu
|
refs/heads/master
|
models/official/detection/dataloader/maskrcnn_parser_with_copy_paste.py
|
1
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data parser and processing for Mask R-CNN."""
import tensorflow.compat.v1 as tf
from dataloader import anchor
from dataloader import mode_keys as ModeKeys
from dataloader import tf_example_decoder
from utils import box_utils
from utils import dataloader_utils
from utils import input_utils
class Parser(object):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
rpn_match_threshold=0.7,
rpn_unmatched_threshold=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5,
aug_rand_hflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
skip_crowd_during_training=True,
max_num_instances=100,
include_mask=False,
mask_crop_size=112,
use_bfloat16=True,
mode=None,
copy_paste_aug=True,
copy_paste_occluded_obj_threshold=300,
copy_paste_box_update_threshold=10):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect raito
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
rpn_match_threshold:
rpn_unmatched_threshold:
rpn_batch_size_per_im:
rpn_fg_fraction:
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The groundtruth data will be padded to `max_num_instances`.
include_mask: a bool to indicate whether parse mask groundtruth.
mask_crop_size: the size which groundtruth mask is cropped to.
use_bfloat16: `bool`, if True, cast output image to tf.bfloat16.
mode: a ModeKeys. Specifies if this is training, evaluation, prediction
or prediction with groundtruths in the outputs.
copy_paste_aug: `bool`, if True, apply copy-paste augmentation.
copy_paste_occluded_obj_threshold: `float`, objects which are occluded by
new pasted objects and their area is less than this number are
considered as fully occluded and will be removed from the ground-truth.
copy_paste_box_update_threshold: `float`, minimum difference in the
original coordinates and new coordinates computed using updated masks to
consider objects as occluded (by pasted objects) and update their
coordinates.
"""
self._mode = mode
self._max_num_instances = max_num_instances
self._skip_crowd_during_training = skip_crowd_during_training
self._is_training = (mode == ModeKeys.TRAIN)
self._example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask=include_mask)
# Anchor.
self._output_size = output_size
self._min_level = min_level
self._max_level = max_level
self._num_scales = num_scales
self._aspect_ratios = aspect_ratios
self._anchor_size = anchor_size
# Target assigning.
self._rpn_match_threshold = rpn_match_threshold
self._rpn_unmatched_threshold = rpn_unmatched_threshold
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._rpn_fg_fraction = rpn_fg_fraction
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
# Mask.
self._include_mask = include_mask
self._mask_crop_size = mask_crop_size
self._copy_paste_aug = copy_paste_aug
self._copy_paste_occluded_obj_threshold = copy_paste_occluded_obj_threshold
self._copy_paste_box_update_threshold = copy_paste_box_update_threshold
# Device.
self._use_bfloat16 = use_bfloat16
# Data is parsed depending on the model Modekey.
if mode == ModeKeys.TRAIN:
self._parse_fn = self._parse_train_data
elif mode == ModeKeys.EVAL:
self._parse_fn = self._parse_eval_data
elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT:
self._parse_fn = self._parse_predict_data
else:
raise ValueError('mode is not defined.')
def __call__(self, value, value2=None):
"""Parses data to an image and associated training labels.
Args:
value: a string tensor holding a serialized tf.Example proto.
value2: if not None, a decoded tensor dictionary containing pre-processed
data of pasting objects for Copy-Paste augmentation.
Returns:
image, labels: if mode == ModeKeys.TRAIN. see _parse_train_data.
{'images': image, 'labels': labels}: if mode == ModeKeys.PREDICT
or ModeKeys.PREDICT_WITH_GT.
"""
with tf.name_scope('parser'):
data = self._example_decoder.decode(value)
if value2:
return self._parse_fn(data, value2)
else:
return self._parse_fn(data)
def _transform_mask(self, image_shape, scale, offset, mask):
"""Transform input mask according to the image info (scale, offset)."""
image_scaled_shape = tf.round(
tf.cast(image_shape, tf.float32) * scale)
image_scaled_shape = tf.cast(image_scaled_shape, tf.int32)
offset = tf.cast(offset, tf.int32)
mask_shape = tf.shape(mask)
mask = tf.image.pad_to_bounding_box(
mask, offset[0], offset[1],
tf.maximum(image_scaled_shape[0], mask_shape[0]) + offset[0],
tf.maximum(image_scaled_shape[1], mask_shape[1]) + offset[1])
mask = mask[0:image_scaled_shape[0], 0:image_scaled_shape[1], :]
mask = tf.image.resize(mask, image_shape)
return mask
def _get_occluded_bbox(self, updated_bbox, bbox):
# Finds bbox ccordinates which are occluded by the new pasted objects.
# If the difference between the boundig box coordinates of updated masks
# and the original boundig box are larger than a threshold then those
# coordinates are considered as occluded.
return tf.greater(tf.abs(updated_bbox - tf.cast(bbox, bbox.dtype)),
self._copy_paste_box_update_threshold)
def _get_visible_masks_indices(self, masks, boxes_, cropped_boxes):
"""Returns indices of not fully occluded objects."""
occluded_objects = tf.reduce_any(
self._get_occluded_bbox(boxes_, cropped_boxes))
areas = tf.reduce_sum(masks, axis=[1, 2])
# Among the occluded objects, finds the objects which their mask area is
# less than copy_paste_occluded_obj_threshold. These objects are considered
# as fully occluded objects and will be removed from the ground-truth.
indices = tf.where(
tf.math.logical_or(
tf.greater(areas, self._copy_paste_occluded_obj_threshold),
tf.math.logical_not(occluded_objects)))
indices = tf.reshape(indices, [-1])
return indices
def _compute_boxes_using_masks(self, masks, image_shape, image_info,
image_scale, offset):
"""Computes boundig boxes using masks."""
masks = tf.cast(masks, tf.int8)
x = tf.reduce_max(masks, axis=1)
xmin = tf.cast(tf.argmax(x, 1), tf.int16)
xmax = tf.cast(image_shape[1], tf.int16) - tf.cast(
tf.argmax(tf.reverse(x, [1]), 1), tf.int16)
y = tf.reduce_max(masks, axis=2)
ymin = tf.cast(tf.argmax(y, 1), tf.int16)
ymax = tf.cast(image_shape[0], tf.int16) - tf.cast(
tf.argmax(tf.reverse(y, [1]), 1), tf.int16)
bbox = tf.stack([ymin, xmin, ymax, xmax], -1)
# Clips boxes.
bbox = tf.cast(bbox, tf.float32)
bbox = input_utils.resize_and_crop_boxes(
bbox, image_scale, image_info[1, :], offset)
bbox += tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
bbox /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
return bbox
def _parse_train_data(self, data, data2=None):
"""Parses data for training.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
data2: if not None, a decoded tensor dictionary containing pre-processed
data of pasting objects for Copy-Paste augmentation.
Returns:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following describes
{key: value} pairs in the dictionary.
image_info: a 2D `Tensor` that encodes the information of the image and
the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
rpn_score_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
rpn_box_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
image that is fed to the network. The tennsor is padded with -1 to
the fixed dimension [self._max_num_instances, 4].
gt_classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [self._max_num_instances].
gt_masks: groundtrugh masks cropped by the bounding box and
resized to a fixed size determined by mask_crop_size.
"""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
if self._include_mask:
masks = data['groundtruth_instance_masks']
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training and self._is_training:
num_groundtrtuhs = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
if self._include_mask:
masks = tf.gather(masks, indices)
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Flips image randomly during training.
if self._aug_rand_hflip:
if self._include_mask:
image, boxes, masks = input_utils.random_horizontal_flip(
image, boxes, masks)
else:
image, boxes = input_utils.random_horizontal_flip(
image, boxes)
# Converts boxes from normalized coordinates to pixel coordinates.
# Now the coordinates of boxes are w.r.t. the original image.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
# Now the coordinates of boxes are w.r.t the scaled image.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
if self._copy_paste_aug:
# Pastes objects and creates a new composed image.
compose_mask = tf.cast(data2['pasted_objects_mask'],
image.dtype) * tf.ones_like(image)
# Note - original paper would apply gaussian blur here, e.g.:
# compose_mask = simclr_data_util.gaussian_blur(compose_mask, 5, 5)
# This is currently disabled in OSS.
image = image * (1 - compose_mask) + data2['image'] * compose_mask
if self._include_mask:
masks = tf.gather(masks, indices)
if self._copy_paste_aug:
pasted_objects_mask = self._transform_mask(
image_shape, image_scale, offset,
tf.cast(data2['pasted_objects_mask'], tf.int8))
pasted_objects_mask = tf.cast(pasted_objects_mask, tf.int8)
pasted_objects_mask = tf.expand_dims(
tf.squeeze(pasted_objects_mask, -1), 0) * tf.ones(
tf.shape(masks), dtype=pasted_objects_mask.dtype)
# Updates masks according to the occlusion from the pasted objects.
masks = tf.where(
tf.equal(pasted_objects_mask, 1), tf.zeros_like(masks), masks)
# Transfer boxes to the original image space and do normalization.
cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
if self._copy_paste_aug:
# Computes bounding boxes of objects using updated masks.
boxes_ = self._compute_boxes_using_masks(
masks, image_shape, image_info, image_scale, offset)
# Filters out objects that are fully occluded in the new image.
indices = self._get_visible_masks_indices(
masks, boxes_, cropped_boxes)
boxes_ = tf.gather(boxes_, indices)
boxes = tf.gather(boxes, indices)
cropped_boxes = tf.gather(cropped_boxes, indices)
masks = tf.gather(masks, indices)
classes = tf.gather(classes, indices)
# Updates bounding boxes of which are occluded by new pasted objects.
def update_bboxes(boxes_, cropped_boxes):
occluded_bbox = self._get_occluded_bbox(boxes_, cropped_boxes)
cropped_boxes = tf.where(occluded_bbox,
tf.cast(boxes_, cropped_boxes.dtype),
cropped_boxes)
boxes = input_utils.resize_and_crop_boxes(cropped_boxes, image_scale,
image_info[1, :], offset)
return boxes, cropped_boxes
boxes, cropped_boxes = update_bboxes(boxes_, cropped_boxes)
cropped_boxes = box_utils.normalize_boxes(cropped_boxes, image_shape)
num_masks = tf.shape(masks)[0]
masks = tf.image.crop_and_resize(
tf.expand_dims(masks, axis=-1),
cropped_boxes,
box_indices=tf.range(num_masks, dtype=tf.int32),
crop_size=[self._mask_crop_size, self._mask_crop_size],
method='bilinear')
masks = tf.squeeze(masks, axis=-1)
if self._copy_paste_aug:
# Adding the masks, boxes and classes of the pasted objects.
if self._include_mask:
masks = tf.concat([masks, data2['masks']], axis=0)
boxes = tf.concat([boxes, data2['boxes']], axis=0)
classes = tf.concat([classes, data2['classes']], axis=0)
# Assigns anchor targets.
# Note that after the target assignment, box targets are absolute pixel
# offsets w.r.t. the scaled image.
input_anchor = anchor.Anchor(
self._min_level,
self._max_level,
self._num_scales,
self._aspect_ratios,
self._anchor_size,
(image_height, image_width))
anchor_labeler = anchor.RpnAnchorLabeler(
input_anchor,
self._rpn_match_threshold,
self._rpn_unmatched_threshold,
self._rpn_batch_size_per_im,
self._rpn_fg_fraction)
rpn_score_targets, rpn_box_targets = anchor_labeler.label_anchors(
boxes, tf.cast(tf.expand_dims(classes, axis=-1), dtype=tf.float32))
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Packs labels for model_fn outputs.
labels = {
'anchor_boxes': input_anchor.multilevel_boxes,
'image_info': image_info,
'rpn_score_targets': rpn_score_targets,
'rpn_box_targets': rpn_box_targets,
}
labels['gt_boxes'] = input_utils.clip_or_pad_to_fixed_size(
boxes, self._max_num_instances, -1)
labels['gt_classes'] = input_utils.clip_or_pad_to_fixed_size(
classes, self._max_num_instances, -1)
if self._include_mask:
labels['gt_masks'] = input_utils.clip_or_pad_to_fixed_size(
masks, self._max_num_instances, -1)
return image, labels
def _parse_eval_data(self, data):
"""Parses data for evaluation.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following describes
{key: value} pairs in the dictionary.
image_info: a 2D `Tensor` that encodes the information of the image and
the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
groundtruths:
source_id: Groundtruth source id.
height: Original image height.
width: Original image width.
boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled
image that is fed to the network. The tennsor is padded with -1 to
the fixed dimension [self._max_num_instances, 4].
classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [self._max_num_instances].
areas: Box area or mask area depend on whether mask is present.
is_crowds: Whether the ground truth label is a crowd label.
num_groundtruths: Number of ground truths in the image.
"""
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# Assigns anchor targets.
input_anchor = anchor.Anchor(
self._min_level,
self._max_level,
self._num_scales,
self._aspect_ratios,
self._anchor_size,
(image_height, image_width))
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Sets up groundtruth data for evaluation.
groundtruths = {
'source_id':
data['source_id'],
'height':
data['height'],
'width':
data['width'],
'num_groundtruths':
tf.shape(data['groundtruth_classes']),
'boxes':
box_utils.denormalize_boxes(data['groundtruth_boxes'], image_shape),
'classes':
data['groundtruth_classes'],
'areas':
data['groundtruth_area'],
'is_crowds':
tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
# TODO(b/143766089): Add ground truth masks for segmentation metrics.
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.clip_or_pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
# Packs labels for model_fn outputs.
labels = {
'anchor_boxes': input_anchor.multilevel_boxes,
'image_info': image_info,
'groundtruths': groundtruths,
}
return image, labels
def _parse_predict_data(self, data):
"""Parses data for prediction.
Args:
data: the decoded tensor dictionary from TfExampleDecoder.
Returns:
A dictionary of {'images': image, 'labels': labels} where
images: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels: a dictionary of tensors used for training. The following
describes {key: value} pairs in the dictionary.
source_ids: Source image id. Default value -1 if the source id is
empty in the groundtruth annotation.
image_info: a 2D `Tensor` that encodes the information of the image
and the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each
level.
"""
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Compute Anchor boxes.
input_anchor = anchor.Anchor(
self._min_level,
self._max_level,
self._num_scales,
self._aspect_ratios,
self._anchor_size,
(image_height, image_width))
labels = {
'source_id': dataloader_utils.process_source_id(data['source_id']),
'anchor_boxes': input_anchor.multilevel_boxes,
'image_info': image_info,
}
if self._mode == ModeKeys.PREDICT_WITH_GT:
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(
data['groundtruth_boxes'], image_shape)
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_classes']),
'boxes': boxes,
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.clip_or_pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
labels['groundtruths'] = groundtruths
return {
'images': image,
'labels': labels,
}
|
eriser/picasso-graphic
|
refs/heads/master
|
tools/gyp/pylib/gyp/easy_xml_test.py
|
2698
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
|
racker/service-registry
|
refs/heads/master
|
node_modules/rproxy/node_modules/cluster2/node_modules/npm/node_modules/node-gyp/gyp/test/variants/gyptest-variants.py
|
240
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify handling of build variants.
TODO: Right now, only the SCons generator supports this, so the
test case is SCons-specific. In particular, it relise on SCons'
ability to rebuild in response to changes on the command line. It
may be simpler to just drop this feature if the other generators
can't be made to behave the same way.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['scons'])
test.run_gyp('variants.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('variants.gyp', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello, world!\n")
test.sleep()
test.build('variants.gyp', 'VARIANT1=1', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello from VARIANT1\n")
test.sleep()
test.build('variants.gyp', 'VARIANT2=1', chdir='relocate/src')
test.run_built_executable('variants',
chdir='relocate/src',
stdout="Hello from VARIANT2\n")
test.pass_test()
|
EliotBerriot/django
|
refs/heads/master
|
django/contrib/admin/templatetags/admin_list.py
|
173
|
from __future__ import unicode_literals
import datetime
import warnings
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.utils import formats
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.svg' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if allow_tags:
warnings.warn(
"Deprecated allow_tags attribute used on field {}. "
"Use django.utils.safestring.format_html(), "
"format_html_join(), or mark_safe() instead.".format(field_name),
RemovedInDjango20Warning
)
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' onclick="opener.dismissRelatedLookupPopup(window, '
''{}'); return false;"', result_id
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field(field_name)
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda filters: cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
cloudtools/troposphere
|
refs/heads/master
|
tests/test_apigateway.py
|
1
|
import unittest
from troposphere import Join
from troposphere.apigateway import GatewayResponse, Model
class TestModel(unittest.TestCase):
def test_schema(self):
# Check with no schema
model = Model(
"schema",
RestApiId="apiid",
)
model.validate()
# Check valid json schema string
model = Model(
"schema",
RestApiId="apiid",
Schema='{"a": "b"}',
)
model.validate()
# Check invalid json schema string
model = Model(
"schema",
RestApiId="apiid",
Schema='{"a: "b"}',
)
with self.assertRaises(ValueError):
model.validate()
# Check accepting dict and converting to string in validate
d = {"c": "d"}
model = Model("schema", RestApiId="apiid", Schema=d)
model.validate()
self.assertEqual(model.properties["Schema"], '{"c": "d"}')
# Check invalid Schema type
with self.assertRaises(TypeError):
model = Model("schema", RestApiId="apiid", Schema=1)
# Check Schema being an AWSHelperFn
model = Model(
"schema",
RestApiId="apiid",
Schema=Join(":", ['{"a', ': "b"}']),
)
model.validate()
class TestGatewayResponse(unittest.TestCase):
def test_response_type(self):
gateway_response = GatewayResponse(
"GatewayResponse",
ResponseType="DEFAULT_4XX",
RestApiId="apiid",
StatusCode="200",
)
gateway_response.validate()
with self.assertRaises(ValueError):
gateway_response = GatewayResponse(
"GatewayResponse",
ResponseType="INVALID_RESPONSE_TYPE",
RestApiId="apiid",
StatusCode="200",
)
if __name__ == "__main__":
unittest.main()
|
TheWardoctor/Wardoctors-repo
|
refs/heads/master
|
script.module.nanscrapers/lib/nanscrapers/scraperplugins/mfree.py
|
6
|
import re
import urlparse
import requests
from BeautifulSoup import BeautifulSoup
from ..common import clean_title, random_agent
from ..scraper import Scraper
import xbmc
from nanscrapers.modules import cfscrape
class Mfree(Scraper):
domains = ['m4ufree.info']
name = "M4U"
def __init__(self):
self.base_link = 'http://m4ufree.info'
self.include_link = '/include/autocomplete.php?q='
self.movie_search_link = '/tag/%s'
self.tv_search_link = '/tagtvs/%s'
self.scraper = cfscrape.create_scraper()
def scrape_movie(self, title, year, imdb, debrid = False):
try:
headers = {'User-Agent': random_agent()}
q = (title.translate(None, '\/:*?."\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
page = 1
query = urlparse.urljoin(self.base_link, self.movie_search_link % q, page)
cleaned_title = clean_title(title)
while True:
html = self.scraper.get(query, headers=headers, timeout=30).content
containers = re.compile('<a class="top-item".*href="(.*?)"><cite>(.*?)</cite></a>').findall(html)
for href, title in containers:
try:
parsed = re.findall('(.+?) \(?(\d{4})', title)
parsed_title = parsed[0][0]
parsed_years = parsed[0][1]
except:
continue
if cleaned_title == clean_title(parsed_title) and year == parsed_years:
try:
headers = {'User-Agent': random_agent()}
html = self.scraper.get(href, headers=headers, timeout=30).content
parsed_html = BeautifulSoup(html)
quality_title = parsed_html.findAll("h3", attrs={'title': re.compile("Quality of ")})[0]
quality = quality_title.findAll('span')[0].text
match = re.search('href="([^"]+-full-movie-.*?[^"]+)', html)
if match:
url = match.group(1)
return self.sources(url, quality)
except:
pass
page_numbers = re.findall("http://m4ufree.info/tag/%s/(.*)\"" % q, html)
if str(page + 1) not in page_numbers:
break
else:
page += 1
except:
pass
return []
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
headers = {'User-Agent': random_agent()}
q = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
query = urlparse.urljoin(self.base_link, self.tv_search_link % q)
cleaned_title = clean_title(title)
html = BeautifulSoup(self.scraper.get(query, headers=headers, timeout=30).content)
links = html.findAll('a', attrs={'class': 'top-h1'})
show_url = None
for link in links:
link_title = link.text
if cleaned_title == clean_title(link_title):
show_url = link["href"]
break
if show_url:
html = BeautifulSoup(self.scraper.get(show_url, headers=headers, timeout=30).content)
link_container = html.findAll("div", attrs={'class': 'bottom'})[-1]
episode_links = link_container.findAll("a")
episode_format1 = "S%02dE%02d" % (int(season), int(episode))
episode_format2 = "S%02d-E%02d" % (int(season), int(episode))
for episode_link in episode_links:
button = episode_link.contents[0]
episode_text = button.text
if episode_format1 in episode_text or episode_format2 in episode_text:
episode_url = episode_link["href"]
return self.sources(episode_url, "SD")
def sources(self, url, quality):
print 'Check Scrape: URL passing = %s qual = %s' %(url,quality)
sources = []
try:
headers = {'User-Agent': random_agent(),
'X-Requested-With': 'XMLHttpRequest',
'Referer': url,
'Host': 'm4ufree.info'
}
html = requests.get(url, headers=headers, timeout=30).content
#print 'pagegw'+html
servers = re.compile('class="btn-eps.+?link="(.+?)"',re.DOTALL).findall(html)
for server in servers:
try:
server_url = '%s/ajax_new.php' %(self.base_link)
form_data = {'m4u':server}
#print 'check url>>> '+server_url
server_html = requests.post(server_url, data=form_data,headers=headers, timeout=30).content
if '<h1> Plz visit m4ufree.info for this movie </h1>' in server_html:
server_url = '/ajax-tk.php?tk=tuan&v=%s' % server["link"]
server_url = urlparse.urljoin(self.base_link, server_url)
server_html = self.scraper.get(server_url, headers=headers, timeout=30).content
links = []
try:
links.extend(re.findall('.*?"file":.*?"(.*?)"', server_html, re.I | re.DOTALL))
except:
pass
try:
links.extend(re.findall(r'sources: \[.*?\{file: "(.*?)"', server_html, re.I | re.DOTALL))
except:
pass
try:
links.extend(re.findall(r'<source.*?src="(.*?)"', server_html, re.I | re.DOTALL))
except:
pass
try:
links.extend(re.findall(r'<iframe.*?src="(.*?)"', server_html, re.I | re.DOTALL))
except:
pass
for link in links:
try:
link_source = link.replace('../view.php?', 'view.php?').replace('./view.php?', 'view.php?')
if not link_source.startswith('http'): link_source = urlparse.urljoin(self.base_link,
link_source)
if "m4u" in link_source:
try:
req = self.scraper.head(link_source, headers=headers)
if req.headers['Location'] != "":
link_source = req.headers['Location']
except:
pass
if 'google' in link_source:
try:
quality = googletag(link_source)[0]['quality']
except:
pass
sources.append(
{'source': 'google video', 'quality': quality, 'scraper': self.name,
'url': link_source,
'direct': True})
elif 'openload.co' in link_source:
sources.append(
{'source': 'openload.co', 'quality': quality, 'scraper': self.name,
'url': link_source,
'direct': False})
else:
loc = urlparse.urlparse(link_source).netloc # get base host (ex. www.google.com)
source_base = str.join(".",loc.split(".")[1:-1])
if "4sync" in source_base:
try:
html = self.scraper.get(link_source).content
link_source = re.findall("<source src=\"(.*?)\"", html)[0]
try:
quality = re.findall("title: \".*?(\d+)p.*?\"", html)[0]
if quality == "720":
quality = "540"
except:
pass
except:
continue
sources.append(
{'source': source_base, 'quality': quality, 'scraper': self.name, 'url': link_source,
'direct': True})
except:
continue
except:
continue
except:
pass
return sources
def googletag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try:
quality = quality[0]
except:
return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': '720', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': '480', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': '480', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': '480', 'url': url}]
else:
raise
|
h4ck3rm1k3/gcc-python-plugin-1
|
refs/heads/master
|
tests/cpychecker/refcounts/attributes/sets-exception/correct-usage/script.py
|
206
|
# -*- coding: utf-8 -*-
# Copyright 2011 David Malcolm <dmalcolm@redhat.com>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
from libcpychecker import main
main(verify_refcounting=True,
dump_traces=True)
|
dimtion/jml
|
refs/heads/master
|
outputFiles/statistics/archives/ourIA/improved_closest_v2.py/0.8/5/player1.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####################################################################################################################################################################################################################################
######################################################################################################## PRE-DEFINED IMPORTS #######################################################################################################
####################################################################################################################################################################################################################################
# Imports that are necessary for the program architecture to work properly
# Do not edit this code
import ast
import sys
import os
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED CONSTANTS ######################################################################################################
####################################################################################################################################################################################################################################
# Possible characters to send to the maze application
# Any other will be ignored
# Do not edit this code
UP = 'U'
DOWN = 'D'
LEFT = 'L'
RIGHT = 'R'
####################################################################################################################################################################################################################################
# Name of your team
# It will be displayed in the maze
# You have to edit this code
TEAM_NAME = "Improved closest v2"
####################################################################################################################################################################################################################################
########################################################################################################## YOUR VARIABLES ##########################################################################################################
####################################################################################################################################################################################################################################
# Stores all the moves in a list to restitute them one by one
allMoves = [UP, RIGHT, DOWN, RIGHT, UP, UP, UP, UP, RIGHT, UP, RIGHT, UP, UP, UP, RIGHT, RIGHT, RIGHT, RIGHT]
####################################################################################################################################################################################################################################
####################################################################################################### PRE-DEFINED FUNCTIONS ######################################################################################################
####################################################################################################################################################################################################################################
# Writes a message to the shell
# Use for debugging your program
# Channels stdout and stdin are captured to enable communication with the maze
# Do not edit this code
def debug (text) :
# Writes to the stderr channel
sys.stderr.write(str(text) + "\n")
sys.stderr.flush()
####################################################################################################################################################################################################################################
# Reads one line of information sent by the maze application
# This function is blocking, and will wait for a line to terminate
# The received information is automatically converted to the correct type
# Do not edit this code
def readFromPipe () :
# Reads from the stdin channel and returns the structure associated to the string
try :
text = sys.stdin.readline()
return ast.literal_eval(text.strip())
except :
os._exit(-1)
####################################################################################################################################################################################################################################
# Sends the text to the maze application
# Do not edit this code
def writeToPipe (text) :
# Writes to the stdout channel
sys.stdout.write(text)
sys.stdout.flush()
####################################################################################################################################################################################################################################
# Reads the initial maze information
# The function processes the text and returns the associated variables
# The dimensions of the maze are positive integers
# Maze map is a dictionary associating to a location its adjacent locations and the associated weights
# The preparation time gives the time during which 'initializationCode' can make computations before the game starts
# The turn time gives the time during which 'determineNextMove' can make computations before returning a decision
# Player locations are tuples (line, column)
# Coins are given as a list of locations where they appear
# A boolean indicates if the game is over
# Do not edit this code
def processInitialInformation () :
# We read from the pipe
data = readFromPipe()
return (data['mazeWidth'], data['mazeHeight'], data['mazeMap'], data['preparationTime'], data['turnTime'], data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
# Reads the information after each player moved
# The maze map and allowed times are no longer provided since they do not change
# Do not edit this code
def processNextInformation () :
# We read from the pipe
data = readFromPipe()
return (data['playerLocation'], data['opponentLocation'], data['coins'], data['gameIsOver'])
####################################################################################################################################################################################################################################
########################################################################################################## YOUR FUNCTIONS ##########################################################################################################
####################################################################################################################################################################################################################################
# This is where you should write your code to do things during the initialization delay
# This function should not return anything, but should be used for a short preprocessing
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def initializationCode (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# Nothing to do
pass
####################################################################################################################################################################################################################################
# This is where you should write your code to determine the next direction
# This function should return one of the directions defined in the CONSTANTS section
# This function takes as parameters the dimensions and map of the maze, the time it is allowed for computing, the players locations in the maze and the remaining coins locations
# Make sure to have a safety margin for the time to include processing times (communication etc.)
def determineNextMove (mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :
# We return the next move as described by the list
global allMoves
nextMove = allMoves[0]
allMoves = allMoves[1:]
return nextMove
####################################################################################################################################################################################################################################
############################################################################################################# MAIN LOOP ############################################################################################################
####################################################################################################################################################################################################################################
# This is the entry point when executing this file
# We first send the name of the team to the maze
# The first message we receive from the maze includes its dimensions and map, the times allowed to the various steps, and the players and coins locations
# Then, at every loop iteration, we get the maze status and determine a move
# Do not edit this code
if __name__ == "__main__" :
# We send the team name
writeToPipe(TEAM_NAME + "\n")
# We process the initial information and have a delay to compute things using it
(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = processInitialInformation()
initializationCode(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)
# We decide how to move and wait for the next step
while not gameIsOver :
(playerLocation, opponentLocation, coins, gameIsOver) = processNextInformation()
if gameIsOver :
break
nextMove = determineNextMove(mazeWidth, mazeHeight, mazeMap, turnTime, playerLocation, opponentLocation, coins)
writeToPipe(nextMove)
####################################################################################################################################################################################################################################
####################################################################################################################################################################################################################################
|
GitHublong/hue
|
refs/heads/master
|
desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/config.py
|
56
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import CONFIGNS
from .element import Element
# Autogenerated
def ConfigItem(**args):
return Element(qname = (CONFIGNS, 'config-item'), **args)
def ConfigItemMapEntry(**args):
return Element(qname = (CONFIGNS,'config-item-map-entry'), **args)
def ConfigItemMapIndexed(**args):
return Element(qname = (CONFIGNS,'config-item-map-indexed'), **args)
def ConfigItemMapNamed(**args):
return Element(qname = (CONFIGNS,'config-item-map-named'), **args)
def ConfigItemSet(**args):
return Element(qname = (CONFIGNS, 'config-item-set'), **args)
|
hanlind/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/300_migration_context.py
|
44
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'migration_context'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
|
Esri/utilities-solution-data-automation
|
refs/heads/master
|
solutionreporttools/csvexport.py
|
1
|
"""
@author: ArcGIS for Gas Utilities
@contact: ArcGISTeamUtilities@esri.com
@company: Esri
@version: 1.0
@description: Class is used to export a feature into CSV using field alias
@requirements: Python 2.7.x, ArcGIS 10.2
@copyright: Esri, 2015
@original source of script is from http://mappatondo.blogspot.com/2012/10/this-is-my-python-way-to-export-feature.html with modifications
"""
import sys, arcpy, csv
from arcpy import env
class ReportToolsError(Exception):
""" raised when error occurs in utility module functions """
pass
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
class CSVExport:
_tempWorkspace = None
_layers = None
_CSVLocation = None
def __init__(self, CSVLocation="", layer=None, workspace = None):
# Gets the values of where the temp feature class resides and
# the output location of the CSV.
try:
self._tempWorkspace = workspace
self._layer = layer
self._CSVLocation = CSVLocation
except arcpy.ExecuteError:
line, filename, synerror = trace()
raise ReportToolsError({
"function": "create_report_layers_using_config",
"line": line,
"filename": filename,
"synerror": synerror,
"arcpyError": arcpy.GetMessages(2),
}
)
except:
line, filename, synerror = trace()
raise ReportToolsError({
"function": "create_report_layers_using_config",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
def WriteCSV(self):
# This function writes the CSV. It writes the header then the rows. This script omits the SHAPE fields.
try:
env.workspace = self._tempWorkspace
#fc = arcpy.ListFeatureClasses(self._layers)
# for fcs in self._layer:
fcs = self._layer
if arcpy.Exists(fcs):
with open(self._CSVLocation, 'wb') as outFile:
print "%s create" % self._CSVLocation
linewriter = csv.writer(outFile, delimiter = ',')
fcdescribe = arcpy.Describe(fcs)
flds = fcdescribe.Fields
# skip shape fields and derivatives
attrs = ("areaFieldName", "lengthFieldName", "shapeFieldName")
resFields = [getattr(fcdescribe, attr) for attr in attrs
if hasattr(fcdescribe, attr)]
header,fldLst = zip(*((fld.AliasName, fld.name) for fld in flds
if fld.name not in resFields))
linewriter.writerow([h.encode('utf8') if isinstance(h, unicode) else h for h in header])
linewriter.writerows([[r.encode('utf8') if isinstance(r, unicode) else r for r in row]
for row in arcpy.da.SearchCursor(fcs, fldLst)])
print "CSV file complete"
return True
except arcpy.ExecuteError:
line, filename, synerror = trace()
raise ReportToolsError({
"function": "create_report_layers_using_config",
"line": line,
"filename": filename,
"synerror": synerror,
"arcpyError": arcpy.GetMessages(2),
}
)
except:
line, filename, synerror = trace()
raise ReportToolsError({
"function": "create_report_layers_using_config",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
|
CloudBoltSoftware/cloudbolt-forge
|
refs/heads/master
|
actions/cloudbolt_plugins/sample_set_group_by_datastore/set_group_by_datastore.py
|
1
|
"""
An example CloudBolt post-sync-vms hook script that sets the group on newly
synced servers that are on the associated datastore.
To use, first create a parameter for the group with a name that matches the
GROUP_DATASTORE_REGEX_CF value below. The term CF used throughout stands for
Custom Field, which is the same as a parameter. The value of the parameter
should be the name of the datastore you want to associate with the group. Note
that it must be a datastore and not a datastore cluster.
"""
import re
from resourcehandlers.vmware.models import VsphereResourceHandler
from orders.models import CustomFieldValue
GROUP_DATASTORE_REGEX_CF = "datastore_regex"
def get_cfv_group_mapping(cf_name):
"""
Given a specific CF, create and return a dictionary mapping of values and the
CloudBolt groups those values are associated with.
"""
mapping = {}
for cfv in CustomFieldValue.objects.filter(field__name=cf_name):
# Assumption: Every group will have a unique datastore regex value
group = cfv.group_set.first()
if group and cfv.value not in mapping:
mapping[cfv.value] = group
return mapping
def get_datastore_for_server(server):
"""
Given a server, return its datastore based on its first disk.
"""
root_disk = server.disks.first()
if root_disk:
return root_disk.cast().datastore
return None
def set_group_by_datastore(server, job, datastore_group_mapping):
"""
Given a server, compare its datastore to the regex values used to map to
groups and for any that match set the server's group accordingly.
"""
datastore = get_datastore_for_server(server)
if not datastore:
return
# Loop through the regex values and try to match
for regex in datastore_group_mapping:
if re.match(regex, datastore):
group = datastore_group_mapping[regex]
# Only need to set the group if it's not already set
if group != server.group:
job.set_progress("Adding server '{}' to group '{}'".format(
server, group))
server.group = group
server.save()
def run(job, logger=None):
datastore_group_mapping = get_cfv_group_mapping(GROUP_DATASTORE_REGEX_CF)
if not datastore_group_mapping:
return "", "", ""
jp = job.job_parameters.cast()
job.set_progress(
"Setting group on newly discovered servers based on their datastore"
)
# For each VMware VM try to set group
for rh in jp.resource_handlers.all():
rh = rh.cast()
if not isinstance(rh, VsphereResourceHandler):
continue
job.set_progress("Adjusting group for servers in {}".format(rh.name))
try:
rh.verify_connection()
except:
job.set_progress("Failed to connect to {}... skipping it".format(rh.name))
continue
for server in rh.server_set.filter(status='ACTIVE'):
set_group_by_datastore(server, job, datastore_group_mapping)
return "", "", ""
|
yl565/statsmodels
|
refs/heads/master
|
statsmodels/tsa/tests/test_ar.py
|
1
|
"""
Test AR Model
"""
import statsmodels.api as sm
from statsmodels.compat.python import range
from statsmodels.tsa.ar_model import AR
from numpy.testing import (assert_almost_equal, assert_allclose, assert_)
from statsmodels.tools.testing import assert_equal
from .results import results_ar
import numpy as np
import numpy.testing as npt
from pandas import Series, Index, DatetimeIndex
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
class CheckARMixin(object):
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)
def test_bse(self):
bse = np.sqrt(np.diag(self.res1.cov_params())) # no dof correction
# for compatability with Stata
assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)
assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)
def test_pickle(self):
from statsmodels.compat.python import BytesIO
fh = BytesIO()
#test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0,0)
res_unpickled = self.res1.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res1))
class TestAROLSConstant(CheckARMixin):
"""
Test AR fit by OLS with a constant.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9, method='cmle')
cls.res2 = results_ar.ARResultsOLS(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start0,
DECIMAL_4)
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start9,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=100),
self.res2.FVOLSnneg1start100, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0, DECIMAL_4)
assert_almost_equal(model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200, DECIMAL_4)
#assert_almost_equal(model.predict(params, n=200,start=-109),
# self.res2.FVOLSn200startneg109, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9, DECIMAL_4)
assert_almost_equal(model.predict(params),
self.res2.FVOLSdefault, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312, DECIMAL_4)
class TestAROLSNoConstant(CheckARMixin):
"""f
Test AR fit by OLS without a constant.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9,method='cmle',trend='nc')
cls.res2 = results_ar.ARResultsOLS(constant=False)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start0,
DECIMAL_4)
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start9,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=100),
self.res2.FVOLSnneg1start100, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0, DECIMAL_4)
assert_almost_equal(model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200, DECIMAL_4)
#assert_almost_equal(model.predict(params, n=200,start=-109),
# self.res2.FVOLSn200startneg109, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308,end=424),
self.res2.FVOLSn100start325, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9, DECIMAL_4)
assert_almost_equal(model.predict(params),
self.res2.FVOLSdefault, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312, DECIMAL_4)
#class TestARMLEConstant(CheckAR):
class TestARMLEConstant(object):
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9,method="mle", disp=-1)
cls.res2 = results_ar.ARResultsMLE(constant=True)
def test_predict(self):
model = self.res1.model
# for some reason convergence is off in 1 out of 10 runs on
# some platforms. i've never been able to replicate. see #910
params = np.array([ 5.66817602, 1.16071069, -0.39538222,
-0.16634055, 0.15044614, -0.09439266,
0.00906289, 0.05205291, -0.08584362,
0.25239198])
assert_almost_equal(model.predict(params), self.res2.FVMLEdefault,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=308),
self.res2.FVMLEstart9end308, DECIMAL_4)
assert_almost_equal(model.predict(params, start=100, end=308),
self.res2.FVMLEstart100end308, DECIMAL_4)
assert_almost_equal(model.predict(params, start=0, end=200),
self.res2.FVMLEstart0end200, DECIMAL_4)
# Note: factor 0.5 in below two tests needed to meet precision on OS X.
assert_almost_equal(0.5 * model.predict(params, start=200, end=333),
0.5 * self.res2.FVMLEstart200end334, DECIMAL_4)
assert_almost_equal(0.5 * model.predict(params, start=308, end=333),
0.5 * self.res2.FVMLEstart308end334, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9,end=309),
self.res2.FVMLEstart9end309, DECIMAL_4)
assert_almost_equal(model.predict(params, end=301),
self.res2.FVMLEstart0end301, DECIMAL_4)
assert_almost_equal(model.predict(params, start=4, end=312),
self.res2.FVMLEstart4end312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=2, end=7),
self.res2.FVMLEstart2end7, DECIMAL_4)
def test_dynamic_predict(self):
# for some reason convergence is off in 1 out of 10 runs on
# some platforms. i've never been able to replicate. see #910
params = np.array([ 5.66817602, 1.16071069, -0.39538222,
-0.16634055, 0.15044614, -0.09439266,
0.00906289, 0.05205291, -0.08584362,
0.25239198])
res1 = self.res1
res2 = self.res2
rtol = 8e-6
# assert_raises pre-sample
# 9, 51
start, end = 9, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn[start:end+1], rtol=rtol)
# 9, 308
start, end = 9, 308
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn[start:end+1], rtol=rtol)
# 9, 333
start, end = 9, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn[start:end+1], rtol=rtol)
# 100, 151
start, end = 100, 151
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn2[start:end+1], rtol=rtol)
# 100, 308
start, end = 100, 308
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn2[start:end+1], rtol=rtol)
# 100, 333
start, end = 100, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn2[start:end+1], rtol=rtol)
# 308, 308
start, end = 308, 308
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn3[start:end+1], rtol=rtol)
# 308, 333
start, end = 308, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn3[start:end+1], rtol=rtol)
# 309, 333
start, end = 309, 333
fv = res1.model.predict(params, start, end, dynamic=True)
assert_allclose(fv, res2.fcdyn4[start:end+1], rtol=rtol)
# None, None
start, end = None, None
fv = res1.model.predict(params, dynamic=True)
assert_allclose(fv, res2.fcdyn[9:309], rtol=rtol)
class TestAutolagAR(object):
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
endog = data.endog
results = []
for lag in range(1,16+1):
endog_tmp = endog[16-lag:]
r = AR(endog_tmp).fit(maxlag=lag)
# See issue #324 for why we're doing these corrections vs. R
# results
k_ar = r.k_ar
k_trend = r.k_trend
log_sigma2 = np.log(r.sigma2)
aic = r.aic
aic = (aic - log_sigma2) * (1 + k_ar)/(1 + k_ar + k_trend)
aic += log_sigma2
hqic = r.hqic
hqic = (hqic - log_sigma2) * (1 + k_ar)/(1 + k_ar + k_trend)
hqic += log_sigma2
bic = r.bic
bic = (bic - log_sigma2) * (1 + k_ar)/(1 + k_ar + k_trend)
bic += log_sigma2
results.append([aic, hqic, bic, r.fpe])
res1 = np.asarray(results).T.reshape(4,-1, order='C')
# aic correction to match R
cls.res1 = res1
cls.res2 = results_ar.ARLagResults("const").ic
def test_ic(self):
npt.assert_almost_equal(self.res1, self.res2, DECIMAL_6)
def test_ar_dates():
# just make sure they work
data = sm.datasets.sunspots.load()
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
endog = Series(data.endog, index=dates)
ar_model = sm.tsa.AR(endog, freq='A').fit(maxlag=9, method='mle', disp=-1)
pred = ar_model.predict(start='2005', end='2015')
predict_dates = sm.tsa.datetools.dates_from_range('2005', '2015')
predict_dates = DatetimeIndex(predict_dates, freq='infer')
assert_equal(ar_model.data.predict_dates, predict_dates)
assert_equal(pred.index, predict_dates)
def test_ar_named_series():
dates = sm.tsa.datetools.dates_from_range("2011m1", length=72)
y = Series(np.random.randn(72), name="foobar", index=dates)
results = sm.tsa.AR(y).fit(2)
assert_(results.params.index.equals(Index(["const", "L1.foobar",
"L2.foobar"])))
def test_ar_start_params():
# fix 236
# smoke test
data = sm.datasets.sunspots.load()
res = AR(data.endog).fit(maxlag=9, start_params=0.1*np.ones(10),
method="mle", disp=-1, maxiter=100)
def test_ar_series():
# smoke test for 773
dta = sm.datasets.macrodata.load_pandas().data["cpi"].diff().dropna()
dates = sm.tsa.datetools.dates_from_range("1959Q1", length=len(dta))
dta.index = dates
ar = AR(dta).fit(maxlags=15)
ar.bse
def test_ar_select_order():
# 2118
np.random.seed(12345)
y = sm.tsa.arma_generate_sample([1, -.75, .3], [1], 100)
ts = Series(y, index=DatetimeIndex(start='1/1/1990', periods=100,
freq='M'))
ar = AR(ts)
res = ar.select_order(maxlag=12, ic='aic')
assert_(res == 2)
# GH 2658
def test_ar_select_order_tstat():
rs = np.random.RandomState(123)
tau = 25
y = rs.randn(tau)
ts = Series(y, index=DatetimeIndex(start='1/1/1990', periods=tau,
freq='M'))
ar = AR(ts)
res = ar.select_order(maxlag=5, ic='t-stat')
assert_equal(res, 0)
#TODO: likelihood for ARX model?
#class TestAutolagARX(object):
# def setup(self):
# data = sm.datasets.macrodata.load()
# endog = data.data.realgdp
# exog = data.data.realint
# results = []
# for lag in range(1, 26):
# endog_tmp = endog[26-lag:]
# exog_tmp = exog[26-lag:]
# r = AR(endog_tmp, exog_tmp).fit(maxlag=lag, trend='ct')
# results.append([r.aic, r.hqic, r.bic, r.fpe])
# self.res1 = np.asarray(results).T.reshape(4,-1, order='C')
|
40023154/2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_sre.py
|
622
|
# NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
|
Eagles2F/sync-engine
|
refs/heads/master
|
migrations/versions/017_haspublicid.py
|
11
|
"""HasPublicID
Revision ID: 2c9f3a06de09
Revises: 5093433b073
Create Date: 2014-04-26 04:05:57.715053
"""
from __future__ import division
# revision identifiers, used by Alembic.
revision = '2c9f3a06de09'
down_revision = '5093433b073'
import sys
from gc import collect as garbage_collect
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
chunk_size = 500
def upgrade():
from inbox.sqlalchemy_ext.util import generate_public_id
from inbox.models.session import session_scope
# These all inherit HasPublicID
from inbox.models import (
Account, Block, Contact, Message, Namespace,
SharedFolder, Thread, User, UserSession, HasPublicID)
classes = [
Account, Block, Contact, Message, Namespace,
SharedFolder, Thread, User, UserSession]
for c in classes:
assert issubclass(c, HasPublicID)
print '[{0}] adding public_id column... '.format(c.__tablename__),
sys.stdout.flush()
op.add_column(c.__tablename__, sa.Column(
'public_id', mysql.BINARY(16), nullable=False))
print 'adding index... ',
op.create_index(
'ix_{0}_public_id'.format(c.__tablename__),
c.__tablename__,
['public_id'],
unique=False)
print 'Done!'
sys.stdout.flush()
print 'Finished adding columns. \nNow generating public_ids'
with session_scope() as db_session:
count = 0
for c in classes:
garbage_collect()
print '[{0}] Loading rows. '.format(c.__name__),
sys.stdout.flush()
print 'Generating public_ids',
sys.stdout.flush()
for r in db_session.query(c).yield_per(chunk_size):
count += 1
r.public_id = generate_public_id()
if not count % chunk_size:
sys.stdout.write('.')
sys.stdout.flush()
db_session.commit()
garbage_collect()
sys.stdout.write(' Saving. '.format(c.__name__)),
# sys.stdout.flush()
sys.stdout.flush()
db_session.commit()
sys.stdout.write('Done!\n')
sys.stdout.flush()
print '\nUpdgraded OK!\n'
def downgrade():
# These all inherit HasPublicID
from inbox.models import (
Account, Block, Contact, Message, Namespace,
SharedFolder, Thread, User, UserSession, HasPublicID)
classes = [
Account, Block, Contact, Message, Namespace,
SharedFolder, Thread, User, UserSession]
for c in classes:
assert issubclass(c, HasPublicID)
print '[{0}] Dropping public_id column... '.format(c.__tablename__),
op.drop_column(c.__tablename__, 'public_id')
print 'Dropping index... ',
op.drop_index(
'ix_{0}_public_id'.format(c.__tablename__),
table_name=c.__tablename__)
print 'Done.'
|
d40223223/2015cdbg6-40223223
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/base.py
|
603
|
#!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
|
erasilva/namebench
|
refs/heads/master
|
nb_third_party/jinja2/utils.py
|
189
|
# -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import errno
try:
from thread import allocate_lock
except ImportError:
from dummy_thread import allocate_lock
from collections import deque
from itertools import imap
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(imap(re.escape, ('(', '<', '<'))),
'|'.join(imap(re.escape, ('.', ',', ')', '>', '\n', '>')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
# concatenate a list of strings and convert them to unicode.
# unfortunately there is a bug in python 2.4 and lower that causes
# unicode.join trash the traceback.
_concat = u''.join
try:
def _test_gen_bug():
raise TypeError(_test_gen_bug)
yield None
_concat(_test_gen_bug())
except TypeError, _error:
if not _error.args or _error.args[0] is not _test_gen_bug:
def concat(gen):
try:
return _concat(list(gen))
except:
# this hack is needed so that the current frame
# does not show up in the traceback.
exc_type, exc_value, tb = sys.exc_info()
raise exc_type, exc_value, tb.tb_next
else:
concat = _concat
del _test_gen_bug, _error
# for python 2.x we create outselves a next() function that does the
# basics without exception catching.
try:
next = next
except NameError:
def next(x):
return x.next()
# if this python version is unable to deal with unicode filenames
# when passed to encode we let this function encode it properly.
# This is used in a couple of places. As far as Jinja is concerned
# filenames are unicode *or* bytestrings in 2.x and unicode only in
# 3.x because compile cannot handle bytes
if sys.version_info < (3, 0):
def _encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
else:
def _encode_filename(filename):
assert filename is None or isinstance(filename, str), \
'filenames must be strings'
return filename
from keyword import iskeyword as is_python_keyword
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
FunctionType = type(_func)
GeneratorType = type(_func())
MethodType = type(_C.method)
CodeType = type(_C.method.func_code)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
TracebackType = type(_tb)
FrameType = type(_tb.tb_frame)
del _C, _tb, _func
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decoraotr can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.func_code)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
messuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This use useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError, e:
if e.errno not in (errno.ENOENT, errno.EISDIR):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return 'None'
elif obj is Ellipsis:
return 'Ellipsis'
if obj.__class__.__module__ == '__builtin__':
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
return '%s object' % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, nofollow=False):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(unicode(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem impsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in xrange(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(xrange(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
class Markup(unicode):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen. If you want to use autoescaping in Jinja just enable the
autoescaping feature in the environment.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return unicode.__new__(cls, base)
return unicode.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if hasattr(other, '__html__') or isinstance(other, basestring):
return self.__class__(unicode(self) + unicode(escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, basestring):
return self.__class__(unicode(escape(other)) + unicode(self))
return NotImplemented
def __mul__(self, num):
if isinstance(num, (int, long)):
return self.__class__(unicode.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(imap(_MarkupEscapeHelper, arg))
else:
arg = _MarkupEscapeHelper(arg)
return self.__class__(unicode.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
unicode.__repr__(self)
)
def join(self, seq):
return self.__class__(unicode.join(self, imap(escape, seq)))
join.__doc__ = unicode.join.__doc__
def split(self, *args, **kwargs):
return map(self.__class__, unicode.split(self, *args, **kwargs))
split.__doc__ = unicode.split.__doc__
def rsplit(self, *args, **kwargs):
return map(self.__class__, unicode.rsplit(self, *args, **kwargs))
rsplit.__doc__ = unicode.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return map(self.__class__, unicode.splitlines(self, *args, **kwargs))
splitlines.__doc__ = unicode.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an unicode string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from jinja2.constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, unicode(self))
def striptags(self):
r"""Unescape markup into an unicode string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(unicode, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args))
_escape_argspec(kwargs, kwargs.iteritems())
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(unicode, 'partition'):
partition = make_wrapper('partition'),
rpartition = make_wrapper('rpartition')
# new in python 2.6
if hasattr(unicode, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(unicode, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, basestring):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj):
self.obj = obj
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x])
__str__ = lambda s: str(escape(s.obj))
__unicode__ = lambda s: unicode(escape(s.obj))
__repr__ = lambda s: str(escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
if hasattr(self._queue, 'remove'):
self._remove = self._queue.remove
self._wlock = allocate_lock()
self._append = self._queue.append
def _remove(self, obj):
"""Python 2.4 compatibility."""
for idx, item in enumerate(self._queue):
if item == obj:
del self._queue[idx]
break
def __getstate__(self):
return {
'capacity': self.capacity,
'_mapping': self._mapping,
'_queue': self._queue
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return an shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
try:
return self[key]
except KeyError:
self[key] = default
return default
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise an `KeyError` if it does not exist.
"""
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise an `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
# register the LRU cache as mutable mapping if possible
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError('at least one item has to be provided')
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def next(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u''
return self.sep
# we have to import it down here as the speedups module imports the
# markup type which is define above.
try:
from jinja2._speedups import escape, soft_unicode
except ImportError:
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(unicode(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
def soft_unicode(s):
"""Make a string unicode if it isn't already. That way a markup
string is not converted back to unicode.
"""
if not isinstance(s, unicode):
s = unicode(s)
return s
# partials
try:
from functools import partial
except ImportError:
class partial(object):
def __init__(self, _func, *args, **kwargs):
self._func = _func
self._args = args
self._kwargs = kwargs
def __call__(self, *args, **kwargs):
kwargs.update(self._kwargs)
return self._func(*(self._args + args), **kwargs)
|
Entropy512/kernel_n8013_ics
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
ryandxter/SM-G900H_KK_Opensource_Update3
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
ninotoshi/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/gaussian_conjugate_posteriors_test.py
|
5
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
distributions = tf.contrib.distributions
class GaussianTest(tf.test.TestCase):
def testGaussianConjugateKnownSigmaPosterior(self):
with tf.Session():
mu0 = tf.constant([3.0])
sigma0 = tf.constant([math.sqrt(10.0)])
sigma = tf.constant([math.sqrt(2.0)])
x = tf.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = tf.reduce_sum(x)
n = tf.size(x)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
posterior = distributions.gaussian_conjugates_known_sigma_posterior(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Gaussian))
posterior_log_pdf = posterior.log_pdf(x).eval()
self.assertEqual(posterior_log_pdf.shape, (6,))
def testGaussianConjugateKnownSigmaPosteriorND(self):
with tf.Session():
batch_size = 6
mu0 = tf.constant([[3.0, -3.0]] * batch_size)
sigma0 = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
sigma = tf.constant([[math.sqrt(2.0)]] * batch_size)
x = tf.transpose(
tf.constant([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=tf.float32))
s = tf.reduce_sum(x)
n = tf.size(x)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
posterior = distributions.gaussian_conjugates_known_sigma_posterior(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Gaussian))
posterior_log_pdf = posterior.log_pdf(x).eval()
self.assertEqual(posterior_log_pdf.shape, (6, 2))
def testGaussianConjugateKnownSigmaNDPosteriorND(self):
with tf.Session():
batch_size = 6
mu0 = tf.constant([[3.0, -3.0]] * batch_size)
sigma0 = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
sigma = tf.constant([[math.sqrt(2.0), math.sqrt(4.0)]] * batch_size)
x = tf.constant([
[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0],
[2.5, -2.5, -4.0, 0.0, 1.0, -2.0]], dtype=tf.float32)
s = tf.reduce_sum(x, reduction_indices=[1])
x = tf.transpose(x) # Reshape to shape (6, 2)
n = tf.constant([6] * 2)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
posterior = distributions.gaussian_conjugates_known_sigma_posterior(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(posterior, distributions.Gaussian))
# Calculate log_pdf under the 2 models
posterior_log_pdf = posterior.log_pdf(x)
self.assertEqual(posterior_log_pdf.get_shape(), (6, 2))
self.assertEqual(posterior_log_pdf.eval().shape, (6, 2))
def testGaussianConjugateKnownSigmaPredictive(self):
with tf.Session():
batch_size = 6
mu0 = tf.constant([3.0] * batch_size)
sigma0 = tf.constant([math.sqrt(10.0)] * batch_size)
sigma = tf.constant([math.sqrt(2.0)] * batch_size)
x = tf.constant([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0])
s = tf.reduce_sum(x)
n = tf.size(x)
prior = distributions.Gaussian(mu=mu0, sigma=sigma0)
predictive = distributions.gaussian_congugates_known_sigma_predictive(
prior=prior, sigma=sigma, s=s, n=n)
# Smoke test
self.assertTrue(isinstance(predictive, distributions.Gaussian))
predictive_log_pdf = predictive.log_pdf(x).eval()
self.assertEqual(predictive_log_pdf.shape, (6,))
if __name__ == '__main__':
tf.test.main()
|
WilliamRen/oceanbase
|
refs/heads/master
|
oceanbase_0.4/tests/mergeserver/olap/distribute_range.py
|
13
|
#!/usr/bin/env python
import os
import sys
import re
import optparse
import textwrap
import shutil
import time
from optparse import OptionParser
def main():
parser = OptionParser()
parser.add_option("-f", "--file", action="store", dest = "range_file", help = "input range file")
parser.add_option("-o", "--output", action = "store", dest = "output", help = "output file")
parser.add_option("-i", "--index", action="store", dest = "svr_idx", help = "server index (of current server [0, count-1]")
parser.add_option("-c", "--count", action="store", dest = "svr_count", help="number of server in this cluster")
(options,args) = parser.parse_args(sys.argv)
if not options.range_file or not options.svr_idx or not options.svr_count or not options.output:
parser.print_help()
sys.exit(1)
svr_count = int(options.svr_count)
svr_idx = int(options.svr_idx)
tablet_count = 0
range_input = open(options.range_file, "r")
for line in range_input:
tablet_count += 1
tablet_count_pern = tablet_count/svr_count
tablet_beg = tablet_count_pern * svr_idx
tablet_end = tablet_beg + tablet_count_pern
if svr_idx == svr_count - 1:
tablet_end = tablet_count
range_input.seek(0)
idx = 0
output = open(options.output, "w")
for line in range_input:
if idx % svr_count == svr_idx:
output.write(line)
idx += 1
if __name__ == "__main__":
main()
|
vizual54/MissionPlanner
|
refs/heads/master
|
Lib/lib2to3/fixes/__init__.py
|
201
|
# Dummy file to make this directory a package.
|
miroag/mfs
|
refs/heads/master
|
tests/test_navsource.py
|
1
|
import pytest
from mfs.navsource import NavSourceScraper
def test_model_wrong_url():
with pytest.raises(AttributeError):
scraper = NavSourceScraper('very wrong url') # noqa
def test_live_navsource():
scraper = NavSourceScraper('http://www.navsource.narod.ru/photos/02/020/index.html').scan()
assert scraper.title == 'Бронепалубный крейсер "Варяг"'
assert len(scraper.dl) == 56, 'Wrong number of files'
|
xome4ok/vkbackup
|
refs/heads/master
|
vkbackup/html_backup.py
|
1
|
from datetime import datetime
import os
from jinja2 import Environment, PackageLoader, select_autoescape
from typing import List, Dict
def html_repr(msgs: List[Dict], participants: Dict) -> List[Dict]:
"""Html representation of conversation.
:param msgs: messages list
:param participants: participants dict
:return: list of dicts with parameters to fill template
"""
def audio_to_dict(audio) -> Dict:
"""Dict representation of audio attachment.
:param audio: attachment of type audio
:returns: dict with audio info
"""
return dict(
type='audio',
artist=audio.get('artist') or audio.get('performer', None),
title=audio.get('title', None),
content_restricted='content_restricted' in audio,
url=audio.get('url', None)
)
def photo_to_dict(photo) -> Dict:
"""Dict representation of photo attachment.
:param photo: attachment of type photo
:returns: dict with photo info
"""
return dict(
type='photo',
src_big=photo.get('src_big', None),
src_small=photo.get('src_small', None),
src=photo.get('src', None),
src_xbig=photo.get('src_xbig', None),
src_xxbig=photo.get('src_xxbig', None),
src_xxxbig=photo.get('src_xxxbig', None),
biggest=photo.get('src_xxxbig', None) or photo.get('src_xxbig', None) or
photo.get('src_xbig', None) or photo.get('src_big', None) or
photo.get('src', None) or photo.get('src_small', None)
)
def sticker_to_dict(sticker) -> Dict:
"""Dict representation of sticker.
:param sticker: attachment of type sticker
:returns: dict with sticker info
"""
return dict(
type='sticker',
photo_256=sticker.get('photo_256', None), # preferable
photo_352=sticker.get('photo_352', None),
photo_512=sticker.get('photo_512', None),
photo_128=sticker.get('photo_128', None),
photo_64=sticker.get('photo_64', None),
)
def doc_to_dict(doc) -> Dict:
"""Dict representation of attached document.
:param doc: attachment of type doc
:returns: dict with doc info
"""
return dict(
type='doc',
size=doc.get('size', None),
title=doc.get('title', None),
ext=doc.get('ext', None),
url=doc.get('url', None)
)
def video_to_dict(video) -> Dict:
"""Dict representation of attached video.
:param video: attachment of type video
:returns: dict with video info
"""
return dict(
type='video',
image=video.get('image', None),
title=video.get('title', None)
)
def link_to_dict(link) -> Dict:
"""Dict representation of attached link.
:param link: attachment of type link
:returns: link title and link itself
"""
return dict(
type='link',
title=link.get('title', None),
url=link.get('url', None)
)
def wall_to_dict(wall) -> Dict:
"""Dict representation of wall post attachment
:param wall: attachment of type wall
:returns: dict with wall post info
"""
attachments = wall.get('attachments', None) or []
return dict(
type='wall',
text=wall.get('text', None),
attachments=attachments_to_dicts(attachments) if attachments else None
)
def attachments_to_dicts(attachs: List[Dict]) -> List[Dict]:
"""Dict representation of attachments in wall post or message.
:param attachs: non-empty attachments list
"""
assert attachs
attachment_funcs = dict(
photo=photo_to_dict,
audio=audio_to_dict,
sticker=sticker_to_dict,
doc=doc_to_dict,
video=video_to_dict,
link=link_to_dict,
wall=wall_to_dict
)
return [
attachment_funcs[x['type']](x[x['type']])
for x in attachs
if x['type'] in list(attachment_funcs.keys())
]
def fwd_to_dict(fwd: Dict) -> Dict:
"""Dict representation of forwarded messages.
:param fwd: message['fwd_messages']
"""
return [
to_dialogue(fwd_msg, 'out', participants[fwd_msg['uid']])
for fwd_msg
in fwd
]
def to_dialogue(msg, username=None, peername=None) -> Dict:
attachments = msg.get('attachments', None)
return dict(
date=str(datetime.fromtimestamp(msg['date'])),
body=msg['body'].replace('<br>', '\n'),
forwarded=fwd_to_dict(msg['fwd_messages']) if 'fwd_messages' in msg else None,
attachments=attachments_to_dicts(attachments) if attachments else None,
user=(username if 'out' in msg and msg['out'] else peername) or participants[msg['from_id']],
is_out='out' in msg and msg['out']
)
return [to_dialogue(msg) for msg in msgs]
def render(path, peer_id, msgs, participants, audio, photo):
env = Environment(
loader=PackageLoader('vkbackup', 'templates'),
autoescape=select_autoescape(['html'])
)
messages = env.get_template('layout.html')
html = messages.render(msgs=html_repr(msgs, participants),
peer=peer_id,
participants=participants,
audios=audio,
photos=photo,
)
with open(os.path.join(path, '{}.{}'.format(peer_id, 'html')), 'w', encoding='utf-8') as f:
f.write(html)
|
wallarelvo/TopoPRM
|
refs/heads/master
|
tvd/postman.py
|
2
|
#!/usr/bin/env python2.7
import networkx as nx
__author__ = 'Ralf Kistner'
def pairs(lst, circular=False):
i = iter(lst)
first = prev = item = i.next()
for item in i:
yield prev, item
prev = item
if circular:
yield item, first
def graph_components(graph):
components = list(nx.connected_component_subgraphs(graph))
components.sort(key=lambda c: c.size(), reverse=True)
return components
def odd_graph(graph):
result = nx.Graph()
odd_nodes = [n for n in graph.nodes() if graph.degree(n) % 2 == 1]
for u in odd_nodes:
paths = nx.shortest_path(graph, source=u, weight='weight')
lengths = nx.shortest_path_length(graph, source=u, weight='weight')
for v in odd_nodes:
if u <= v:
continue
result.add_edge(u, v, weight=-lengths[v], path=paths[v])
return result
def edge_sum(graph):
total = 0
for u, v, data in graph.edges(data=True):
total += data['weight']
return total
def matching_cost(graph, matching):
# Calculate the cost of the additional edges
cost = 0
for u, v in matching.items():
if v <= u:
continue
data = graph[u][v]
cost += abs(data['weight'])
return cost
def find_matchings(graph, n=5):
best_matching = nx.max_weight_matching(graph, True)
matchings = [best_matching]
for u, v in best_matching.items():
if v <= u:
continue
smaller_graph = nx.Graph(graph)
smaller_graph.remove_edge(u, v)
matching = nx.max_weight_matching(smaller_graph, True)
if len(matching) > 0:
matchings.append(matching)
matching_costs = [(matching_cost(graph, matching), matching)
for matching in matchings]
matching_costs.sort()
final_matchings = []
last_cost = None
for cost, matching in matching_costs:
if cost == last_cost:
continue
last_cost = cost
final_matchings.append((cost, matching))
return final_matchings
def build_eulerian_graph(graph, odd, matching):
eulerian_graph = nx.MultiGraph(graph)
for u, v in matching.items():
if v <= u:
continue
edge = odd[u][v]
path = edge['path']
for p, q in pairs(path):
eulerian_graph.add_edge(p, q, weight=graph[p][q]['weight'])
return eulerian_graph
def eulerian_circuit(graph):
circuit = list(nx.eulerian_circuit(graph))
nodes = []
for u, v in circuit:
nodes.append(u)
# Close the loop
nodes.append(circuit[0][0])
return nodes
def chinese_postman_paths(graph, n=5):
odd = odd_graph(graph)
matchings = find_matchings(odd, n)
paths = []
for cost, matching in matchings[:n]:
eulerian_graph = build_eulerian_graph(graph, odd, matching)
nodes = eulerian_circuit(eulerian_graph)
paths.append((eulerian_graph, nodes))
return paths
def single_chinese_postman_path(graph):
odd = odd_graph(graph)
matching = nx.max_weight_matching(odd, True)
eulerian_graph = build_eulerian_graph(graph, odd, matching)
nodes = eulerian_circuit(eulerian_graph)
return eulerian_graph, nodes
|
pcastanha/frame
|
refs/heads/master
|
src/softframe/database/__init__.py
|
1
|
from .connection import DatabaseConnector
__all__ = ['DatabaseConnector']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.