repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
akaariai/django | refs/heads/master | django/contrib/gis/geoip/prototypes.py | 535 | from ctypes import POINTER, Structure, c_char_p, c_float, c_int, string_at
from django.contrib.gis.geoip.libgeoip import free, lgeoip
# #### GeoIP C Structure definitions ####
class GeoIPRecord(Structure):
_fields_ = [('country_code', c_char_p),
('country_code3', c_char_p),
('country_name', c_char_p),
('region', c_char_p),
('city', c_char_p),
('postal_code', c_char_p),
('latitude', c_float),
('longitude', c_float),
# TODO: In 1.4.6 this changed from `int dma_code;` to
# `union {int metro_code; int dma_code;};`. Change
# to a `ctypes.Union` in to accommodate in future when
# pre-1.4.6 versions are no longer distributed.
('dma_code', c_int),
('area_code', c_int),
('charset', c_int),
('continent_code', c_char_p),
]
geoip_char_fields = [name for name, ctype in GeoIPRecord._fields_ if ctype is c_char_p]
GEOIP_DEFAULT_ENCODING = 'iso-8859-1'
geoip_encodings = {
0: 'iso-8859-1',
1: 'utf8',
}
class GeoIPTag(Structure):
pass
RECTYPE = POINTER(GeoIPRecord)
DBTYPE = POINTER(GeoIPTag)
# #### ctypes function prototypes ####
# GeoIP_lib_version appeared in version 1.4.7.
if hasattr(lgeoip, 'GeoIP_lib_version'):
GeoIP_lib_version = lgeoip.GeoIP_lib_version
GeoIP_lib_version.argtypes = None
GeoIP_lib_version.restype = c_char_p
else:
GeoIP_lib_version = None
# For freeing memory allocated within a record
GeoIPRecord_delete = lgeoip.GeoIPRecord_delete
GeoIPRecord_delete.argtypes = [RECTYPE]
GeoIPRecord_delete.restype = None
# For retrieving records by name or address.
def check_record(result, func, cargs):
if result:
# Checking the pointer to the C structure, if valid pull out elements
# into a dictionary.
rec = result.contents
record = {fld: getattr(rec, fld) for fld, ctype in rec._fields_}
# Now converting the strings to unicode using the proper encoding.
encoding = geoip_encodings[record['charset']]
for char_field in geoip_char_fields:
if record[char_field]:
record[char_field] = record[char_field].decode(encoding)
# Free the memory allocated for the struct & return.
GeoIPRecord_delete(result)
return record
else:
return None
def record_output(func):
func.argtypes = [DBTYPE, c_char_p]
func.restype = RECTYPE
func.errcheck = check_record
return func
GeoIP_record_by_addr = record_output(lgeoip.GeoIP_record_by_addr)
GeoIP_record_by_name = record_output(lgeoip.GeoIP_record_by_name)
# For opening & closing GeoIP database files.
GeoIP_open = lgeoip.GeoIP_open
GeoIP_open.restype = DBTYPE
GeoIP_delete = lgeoip.GeoIP_delete
GeoIP_delete.argtypes = [DBTYPE]
GeoIP_delete.restype = None
# This is so the string pointer can be freed within Python.
class geoip_char_p(c_char_p):
pass
def check_string(result, func, cargs):
if result:
s = string_at(result)
free(result)
else:
s = ''
return s.decode(GEOIP_DEFAULT_ENCODING)
GeoIP_database_info = lgeoip.GeoIP_database_info
GeoIP_database_info.restype = geoip_char_p
GeoIP_database_info.errcheck = check_string
# String output routines.
def string_output(func):
def _err_check(result, func, cargs):
if result:
return result.decode(GEOIP_DEFAULT_ENCODING)
return result
func.restype = c_char_p
func.errcheck = _err_check
return func
GeoIP_country_code_by_addr = string_output(lgeoip.GeoIP_country_code_by_addr)
GeoIP_country_code_by_name = string_output(lgeoip.GeoIP_country_code_by_name)
GeoIP_country_name_by_addr = string_output(lgeoip.GeoIP_country_name_by_addr)
GeoIP_country_name_by_name = string_output(lgeoip.GeoIP_country_name_by_name)
|
nattee/cafe-grader-web | refs/heads/master | lib/assets/Lib/encodings/cp875.py | 37 | """ Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
'[' # 0x4A -> LEFT SQUARE BRACKET
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'!' # 0x4F -> EXCLAMATION MARK
'&' # 0x50 -> AMPERSAND
'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
']' # 0x5A -> RIGHT SQUARE BRACKET
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'^' # 0x5F -> CIRCUMFLEX ACCENT
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'|' # 0x6A -> VERTICAL LINE
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xa8' # 0x70 -> DIAERESIS
'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\xa0' # 0x74 -> NO-BREAK SPACE
'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
'\xb4' # 0xA0 -> ACUTE ACCENT
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
'\u03be' # 0xAB -> GREEK SMALL LETTER XI
'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
'\xa3' # 0xB0 -> POUND SIGN
'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
'\u2015' # 0xCF -> HORIZONTAL BAR
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb1' # 0xDA -> PLUS-MINUS SIGN
'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
'\x1a' # 0xDC -> SUBSTITUTE
'\u0387' # 0xDD -> GREEK ANO TELEIA
'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
'\xa6' # 0xDF -> BROKEN BAR
'\\' # 0xE0 -> REVERSE SOLIDUS
'\x1a' # 0xE1 -> SUBSTITUTE
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xa7' # 0xEB -> SECTION SIGN
'\x1a' # 0xEC -> SUBSTITUTE
'\x1a' # 0xED -> SUBSTITUTE
'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xEF -> NOT SIGN
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xa9' # 0xFB -> COPYRIGHT SIGN
'\x1a' # 0xFC -> SUBSTITUTE
'\x1a' # 0xFD -> SUBSTITUTE
'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Danielhiversen/home-assistant | refs/heads/master | tests/components/automation/test_webhook.py | 7 | """The tests for the webhook automation trigger."""
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
async def test_webhook_json(hass, aiohttp_client):
"""Test triggering with a JSON webhook."""
events = []
@callback
def store_event(event):
"""Helepr to store events."""
events.append(event)
hass.bus.async_listen('test_success', store_event)
assert await async_setup_component(hass, 'automation', {
'automation': {
'trigger': {
'platform': 'webhook',
'webhook_id': 'json_webhook'
},
'action': {
'event': 'test_success',
'event_data_template': {
'hello': 'yo {{ trigger.json.hello }}',
}
}
}
})
client = await aiohttp_client(hass.http.app)
await client.post('/api/webhook/json_webhook', json={
'hello': 'world'
})
assert len(events) == 1
assert events[0].data['hello'] == 'yo world'
async def test_webhook_post(hass, aiohttp_client):
"""Test triggering with a POST webhook."""
events = []
@callback
def store_event(event):
"""Helepr to store events."""
events.append(event)
hass.bus.async_listen('test_success', store_event)
assert await async_setup_component(hass, 'automation', {
'automation': {
'trigger': {
'platform': 'webhook',
'webhook_id': 'post_webhook'
},
'action': {
'event': 'test_success',
'event_data_template': {
'hello': 'yo {{ trigger.data.hello }}',
}
}
}
})
client = await aiohttp_client(hass.http.app)
await client.post('/api/webhook/post_webhook', data={
'hello': 'world'
})
assert len(events) == 1
assert events[0].data['hello'] == 'yo world'
|
alrusdi/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/django/template/loader_tags.py | 44 | from django.template import TemplateSyntaxError, TemplateDoesNotExist, Variable
from django.template import Library, Node, TextNode
from django.template.loader import get_template
from django.conf import settings
from django.utils.safestring import mark_safe
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = {}
def add_blocks(self, blocks):
for name, block in blocks.iteritems():
if name in self.blocks:
self.blocks[name].insert(0, block)
else:
self.blocks[name] = [block]
def pop(self, name):
try:
return self.blocks[name].pop()
except (IndexError, KeyError):
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except (IndexError, KeyError):
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
context.push()
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = BlockNode(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
context.pop()
return result
def super(self):
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
def __init__(self, nodelist, parent_name, parent_name_expr, template_dirs=None):
self.nodelist = nodelist
self.parent_name, self.parent_name_expr = parent_name, parent_name_expr
self.template_dirs = template_dirs
self.blocks = dict([(n.name, n) for n in nodelist.get_nodes_by_type(BlockNode)])
def __repr__(self):
if self.parent_name_expr:
return "<ExtendsNode: extends %s>" % self.parent_name_expr.token
return '<ExtendsNode: extends "%s">' % self.parent_name
def get_parent(self, context):
if self.parent_name_expr:
self.parent_name = self.parent_name_expr.resolve(context)
parent = self.parent_name
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name_expr:
error_msg += " Got this from the '%s' variable." % self.parent_name_expr.token
raise TemplateSyntaxError(error_msg)
if hasattr(parent, 'render'):
return parent # parent is a Template object
return get_template(parent)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = dict([(n.name, n) for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)])
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class ConstantIncludeNode(Node):
def __init__(self, template_path):
try:
t = get_template(template_path)
self.template = t
except:
if settings.TEMPLATE_DEBUG:
raise
self.template = None
def render(self, context):
if self.template:
return self.template.render(context)
else:
return ''
class IncludeNode(Node):
def __init__(self, template_name):
self.template_name = Variable(template_name)
def render(self, context):
try:
template_name = self.template_name.resolve(context)
t = get_template(template_name)
return t.render(context)
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock', 'endblock %s' % block_name))
parser.delete_first_token()
return BlockNode(block_name, nodelist)
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent tempate itelf (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name, parent_name_expr = None, None
if bits[1][0] in ('"', "'") and bits[1][-1] == bits[1][0]:
parent_name = bits[1][1:-1]
else:
parent_name_expr = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name, parent_name_expr)
def do_include(parser, token):
"""
Loads a template and renders it with the current context.
Example::
{% include "foo/some_include" %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("%r tag takes one argument: the name of the template to be included" % bits[0])
path = bits[1]
if path[0] in ('"', "'") and path[-1] == path[0]:
return ConstantIncludeNode(path[1:-1])
return IncludeNode(bits[1])
register.tag('block', do_block)
register.tag('extends', do_extends)
register.tag('include', do_include)
|
hrydgard/nodeprep | refs/heads/master | python/convert.py | 1 | from xmpp_stringprep import nodeprep
import json
import logging
import sys
import codecs
import os
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
for line in sys.stdin.read().split("\n")[:-1]:
try:
a = unicode(line.strip(), "utf-8")
try:
print nodeprep.prepare(a)
except UnicodeError as e:
print "ILLEGAL:unable to prepare:" + str(e)
except UnicodeError as e:
print "ILLEGAL:unable to decode:" + str(e)
|
adityahase/frappe | refs/heads/develop | frappe/printing/doctype/print_settings/test_print_settings.py | 6 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
class TestPrintSettings(unittest.TestCase):
pass
|
SeedScientific/polio | refs/heads/master | source_data/migrations/0075_auto__del_sourceshapes.py | 1 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'SourceShapes'
db.delete_table(u'source_data_sourceshapes')
def backwards(self, orm):
# Adding model 'SourceShapes'
db.create_table(u'source_data_sourceshapes', (
('shape_area', self.gf('django.db.models.fields.FloatField')()),
('geometry', self.gf('jsonfield.fields.JSONField')()),
('shape_len', self.gf('django.db.models.fields.FloatField')()),
('source_region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['source_data.SourceRegion'], unique=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'source_data', ['SourceShapes'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.campaign': {
'Meta': {'ordering': "('-start_date',)", 'unique_together': "(('office', 'start_date'),)", 'object_name': 'Campaign', 'db_table': "'campaign'"},
'campaign_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.CampaignType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'get_full_name'", 'unique_with': '()'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'datapoints.campaigntype': {
'Meta': {'object_name': 'CampaignType', 'db_table': "'campaign_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.indicator': {
'Meta': {'ordering': "('name',)", 'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.region': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('name', 'region_type', 'office'),)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_high_risk': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'parent_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']", 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'region_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.RegionType']"}),
'shape_file_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '55', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']"})
},
u'datapoints.regiontype': {
'Meta': {'object_name': 'RegionType', 'db_table': "'region_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
'source_data.activityreport': {
'Meta': {'object_name': 'ActivityReport'},
'activity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_hh_pending_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_local_leadership_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_hh_affected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_pro_opv_cd': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cd_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_attendance': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_iec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_caregiver_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_husband_issues': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_positive': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_num_vaccinated': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cm_vcm_sett': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipds_community_leader_present': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_reported': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_issue_resolved': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_children': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_num_hh': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_other_issue': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ipds_team_allowances': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.campaignmap': {
'Meta': {'object_name': 'CampaignMap', 'db_table': "'campaign_map'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'source_campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceCampaign']", 'unique': 'True'})
},
'source_data.clustersupervisor': {
'Meta': {'object_name': 'ClusterSupervisor'},
'coord_rfp_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_smwg_meetings': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'coord_vcm_meeting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'end_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fund_transparency': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_conducted': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_activities_planned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_endorsed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_implementation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_socialdata': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_special_pop': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hrop_workplan_aligned': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_lgac': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ri_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisee_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervision_location_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'supervisor_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_birthtracking': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_data': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm_supervision': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.document': {
'Meta': {'ordering': "('-id',)", 'unique_together': "(('docfile', 'doc_text'),)", 'object_name': 'Document'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'doc_text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'source_data.etljob': {
'Meta': {'ordering': "('-date_attempted',)", 'object_name': 'EtlJob'},
'cron_guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'date_attempted': ('django.db.models.fields.DateTimeField', [], {}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'success_msg': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
'source_data.healthcamp': {
'Meta': {'object_name': 'HealthCamp'},
'agencyname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'appropriate_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'clinician2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'crowdcontroller': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'endtime': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'formhub_uuid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_photo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'hc_stockout': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'megaphone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nc_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_measles': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_patients': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'num_penta': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opvvaccinator': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'recorder_opv': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recorder_ri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'separatetally': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_time': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'townannouncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'userid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.indicatormap': {
'Meta': {'object_name': 'IndicatorMap', 'db_table': "'indicator_map'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'source_indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceIndicator']", 'unique': 'True'})
},
'source_data.knowthepeople': {
'Meta': {'object_name': 'KnowThePeople'},
'brothers': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'citiesvisited': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofpax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'prefferedcity': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sisters': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state_country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.paxlistreporttraining': {
'Meta': {'object_name': 'PaxListReportTraining'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'emailaddr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofparticipant': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.phoneinventory': {
'Meta': {'object_name': 'PhoneInventory'},
'asset_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'colour_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'telephone_no': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmbirthrecord': {
'Meta': {'object_name': 'PracticeVCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsettcoordinates': {
'Meta': {'object_name': 'PracticeVCMSettCoordinates'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.practicevcmsummary': {
'Meta': {'object_name': 'PracticeVCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.processstatus': {
'Meta': {'object_name': 'ProcessStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'source_data.regionmap': {
'Meta': {'object_name': 'RegionMap', 'db_table': "'region_map'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'master_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']", 'unique': 'True'})
},
u'source_data.sourcecampaign': {
'Meta': {'object_name': 'SourceCampaign', 'db_table': "'source_campaign'"},
'campaign_string': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.sourcedatapoint': {
'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint', 'db_table': "'source_datapoint'"},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
'error_msg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'row_number': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"})
},
u'source_data.sourceindicator': {
'Meta': {'object_name': 'SourceIndicator', 'db_table': "'source_indicator'"},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'source_data.sourceregion': {
'Meta': {'unique_together': "(('region_string', 'document', 'region_type', 'country'),)", 'object_name': 'SourceRegion', 'db_table': "'source_region'"},
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_high_risk': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'lon': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'parent_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'parent_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_string': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'region_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmbirthrecord': {
'Meta': {'object_name': 'VCMBirthRecord'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'datereport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dob': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'householdnumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nameofchild': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcm0dose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmnamecattended': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmrilink': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsettlement': {
'Meta': {'object_name': 'VCMSettlement'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'daterecorded': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_accuracy': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_altitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_latitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementgps_longitude': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vcmphone': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummary': {
'Meta': {'object_name': 'VCMSummary'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noreasonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'msd_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vcmsummarynew': {
'Meta': {'object_name': 'VCMSummaryNew'},
'census12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'census2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'censusnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'date_implement': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dateofreport': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_msd2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax4': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax6': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax7': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax8': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'display_vax9': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_display_msd3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_agedoutm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childdiedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_childsickm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_familymovedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_farmm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_hhnotvisitedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_marketm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noconsentm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nofeltneedm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_nogovtservicesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_noplusesm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_otherprotectionm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_playgroundm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poldiffsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascuref': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliohascurem': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_poliouncommonm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_relbeliefsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_schoolm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securityf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_securitym': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_sideeffectsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_soceventm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_toomanyroundsm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_msd_unhappywteamm': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_msd_chd_tot_missed_check': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_afpcase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_cmamreferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_fic': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_mslscase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_newborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_otherdisease': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_pregnantmother': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_rireferral': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_vcmattendedncer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'group_spec_events_spec_zerodose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'settlementcode': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'spec_grp_choice': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_12_59months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_2_11months': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_census': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_missed': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_newborns': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax12_59mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vax2_11mo': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tot_vaxnewborn': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax12_59mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mof': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vax2_11mom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsf': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'vaxnewbornsm': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'source_data.vwsregister': {
'Meta': {'object_name': 'VWSRegister'},
'acceptphoneresponsibility': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 8, 0, 0)'}),
'datephonecollected': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'deviceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'lname_vws': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'meta_instanceid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'personal_phone': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'process_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"}),
'request_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'simserial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'submissiondate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wardcode': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['source_data'] |
asadziach/tensorflow | refs/heads/pedestrian_detection_walabot_tf | tensorflow/contrib/layers/python/layers/optimizers.py | 12 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers, include:
- string, name of the optimizer like 'SGD', 'Adam', see OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- function, takes learning rate `Tensor` as argument and must return
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
- class, subclass of `Optimizer` that takes only one required argument -
learning rate, such as AdamOptimizer, AdagradOptimizer.
E.g. `optimize_loss(..., optimizer=tf.train.AdagradOptimizer)`.
- object, instance of subclass of `Optimizer`.
E.g., `optimizer_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step
unless `increment_global_step` is `False`. If not supplied,
it will be fetched from the default graph (see
`tf.train.get_global_step` for details). If it's
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer`
sub-class and have `compute_gradients` and `apply_gradients`
functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float, callable or `None`. If float, is provided, a global
clipping is applied to prevent the norm of the gradient to exceed this
value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution
between `update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set only the loss and the learning rate will be reported. The
complete list is in OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` is wrong type.
* `clip_gradients` is not float or callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = contrib_framework.get_global_step()
else:
contrib_framework.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (str(learning_rate),
str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError("Unknown type %s for clip_gradients" %
type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and "gradient_norm" in summaries:
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
if the norm exceeds `exp(mean + std_factor*std)`, all gradients are rescaled
such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor).
`max_norm = exp(mean + std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
|
liushilive/HTMLReport | refs/heads/master | HTMLReport/log/HandlerFactory.py | 1 | import logging
import logging.handlers
import sys
import threading
from io import StringIO
LOG_LEVEL_NOTSET = logging.NOTSET
LOG_LEVEL_DEBUG = logging.DEBUG
LOG_LEVEL_INFO = logging.INFO
LOG_LEVEL_WARNING = logging.WARNING
LOG_LEVEL_ERROR = logging.ERROR
# logger target
LOG_TARGET_CONSOLE = 0x1
LOG_TARGET_LOG_FILE = 0x10
LOG_TARGET_LOG_HTTP = 0x100
_LOGGER_FORMAT = "[%(levelname)7s] [%(asctime)s] [%(thread)d] [%(filename)s(%(lineno)d)] - %(message)s"
class InfoOrLessCritical(logging.Filter):
def filter(self, record):
return record.levelno < LOG_LEVEL_WARNING
class HandlerFactory(object):
handlers = {}
streams = {}
@classmethod
def get_std_out_handler(cls):
if 'std_out_handler' not in cls.handlers:
std_out_handler = logging.StreamHandler(sys.stdout)
std_out_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
std_out_handler.addFilter(InfoOrLessCritical())
cls.handlers['std_out_handler'] = std_out_handler
return cls.handlers['std_out_handler']
@classmethod
def get_std_err_handler(cls):
if 'std_err_handler' not in cls.handlers:
std_err_handler = logging.StreamHandler(sys.stderr)
std_err_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
std_err_handler.setLevel(LOG_LEVEL_WARNING)
cls.handlers['std_err_handler'] = std_err_handler
return cls.handlers['std_err_handler']
@classmethod
def get_rotating_file_handler(cls, log_path, max_bytes=0, backup_count=0):
if 'rotating_file_handler' not in cls.handlers:
cls.handlers['rotating_file_handler'] = {}
if log_path not in cls.handlers['rotating_file_handler']:
rotating_file_handler = logging.handlers.RotatingFileHandler(
log_path, 'a', max_bytes, backup_count, encoding='utf8')
rotating_file_handler.setLevel(LOG_LEVEL_NOTSET)
rotating_file_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
cls.handlers['rotating_file_handler'][log_path] = rotating_file_handler
return cls.handlers['rotating_file_handler'][log_path]
@classmethod
def get_stream_handler(cls):
if 'rotating_stream_handler' not in cls.handlers:
cls.handlers['rotating_stream_handler'] = {}
steam_id = str(threading.current_thread().ident)
if steam_id not in cls.handlers['rotating_stream_handler']:
steam = StringIO()
cls.streams[steam_id] = steam
rotating_stream_handler = logging.StreamHandler(cls.streams[steam_id])
rotating_stream_handler.set_name(steam_id)
rotating_stream_handler.setFormatter(logging.Formatter(_LOGGER_FORMAT))
cls.handlers['rotating_stream_handler'][steam_id] = rotating_stream_handler
return cls.handlers['rotating_stream_handler'][steam_id]
@classmethod
def get_stream_value(cls):
steam_id = str(threading.current_thread().ident)
if steam_id in cls.streams:
stream = cls.streams[steam_id].getvalue()
cls.streams[steam_id].truncate(0)
cls.streams[steam_id].seek(0)
return stream
return ""
|
837468220/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_unicode.py | 67 | """Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
"""
import re
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
_literal_re = re.compile(r"[uU][rR]?[\'\"]")
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
if _literal_re.match(node.value):
new = node.clone()
new.value = new.value[1:]
return new
|
optimsoc/gzll-binutils | refs/heads/gzll | gdb/python/lib/gdb/__init__.py | 15 | # Copyright (C) 2010-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import traceback
import os
import sys
import _gdb
if sys.version_info[0] > 2:
# Python 3 moved "reload"
from imp import reload
from _gdb import *
class _GdbFile (object):
# These two are needed in Python 3
encoding = "UTF-8"
errors = "strict"
def close(self):
# Do nothing.
return None
def isatty(self):
return False
def writelines(self, iterable):
for line in iterable:
self.write(line)
def flush(self):
flush()
class GdbOutputFile (_GdbFile):
def write(self, s):
write(s, stream=STDOUT)
sys.stdout = GdbOutputFile()
class GdbOutputErrorFile (_GdbFile):
def write(self, s):
write(s, stream=STDERR)
sys.stderr = GdbOutputErrorFile()
# Default prompt hook does nothing.
prompt_hook = None
# Ensure that sys.argv is set to something.
# We do not use PySys_SetArgvEx because it did not appear until 2.6.6.
sys.argv = ['']
# Initial pretty printers.
pretty_printers = []
# Initial type printers.
type_printers = []
# Initial xmethod matchers.
xmethods = []
# Initial frame filters.
frame_filters = {}
# Convenience variable to GDB's python directory
PYTHONDIR = os.path.dirname(os.path.dirname(__file__))
# Auto-load all functions/commands.
# Packages to auto-load.
packages = [
'function',
'command',
'printer'
]
# pkgutil.iter_modules is not available prior to Python 2.6. Instead,
# manually iterate the list, collating the Python files in each module
# path. Construct the module name, and import.
def auto_load_packages():
for package in packages:
location = os.path.join(os.path.dirname(__file__), package)
if os.path.exists(location):
py_files = filter(lambda x: x.endswith('.py')
and x != '__init__.py',
os.listdir(location))
for py_file in py_files:
# Construct from foo.py, gdb.module.foo
modname = "%s.%s.%s" % ( __name__, package, py_file[:-3] )
try:
if modname in sys.modules:
# reload modules with duplicate names
reload(__import__(modname))
else:
__import__(modname)
except:
sys.stderr.write (traceback.format_exc() + "\n")
auto_load_packages()
def GdbSetPythonDirectory(dir):
"""Update sys.path, reload gdb and auto-load packages."""
global PYTHONDIR
try:
sys.path.remove(PYTHONDIR)
except ValueError:
pass
sys.path.insert(0, dir)
PYTHONDIR = dir
# note that reload overwrites the gdb module without deleting existing
# attributes
reload(__import__(__name__))
auto_load_packages()
|
LittleLama/Sick-Beard-BoxCar2 | refs/heads/development | lib/hachoir_parser/file_system/linux_swap.py | 90 | """
Linux swap file.
Documentation: Linux kernel source code, files:
- mm/swapfile.c
- include/linux/swap.h
Author: Victor Stinner
Creation date: 25 december 2006 (christmas ;-))
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (ParserError, GenericVector,
UInt32, String,
Bytes, NullBytes, RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import humanFilesize
from lib.hachoir_core.bits import str2hex
PAGE_SIZE = 4096
# Definition of MAX_SWAP_BADPAGES in Linux kernel:
# (__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)
MAX_SWAP_BADPAGES = ((PAGE_SIZE - 10) - 1536) // 4
class Page(RawBytes):
static_size = PAGE_SIZE*8
def __init__(self, parent, name):
RawBytes.__init__(self, parent, name, PAGE_SIZE)
class UUID(Bytes):
static_size = 16*8
def __init__(self, parent, name):
Bytes.__init__(self, parent, name, 16)
def createDisplay(self):
text = str2hex(self.value, format=r"%02x")
return "%s-%s-%s-%s-%s" % (
text[:8], text[8:12], text[12:16], text[16:20], text[20:])
class LinuxSwapFile(Parser):
PARSER_TAGS = {
"id": "linux_swap",
"file_ext": ("",),
"category": "file_system",
"min_size": PAGE_SIZE*8,
"description": "Linux swap file",
"magic": (
("SWAP-SPACE", (PAGE_SIZE-10)*8),
("SWAPSPACE2", (PAGE_SIZE-10)*8),
("S1SUSPEND\0", (PAGE_SIZE-10)*8),
),
}
endian = LITTLE_ENDIAN
def validate(self):
magic = self.stream.readBytes((PAGE_SIZE-10)*8, 10)
if magic not in ("SWAP-SPACE", "SWAPSPACE2", "S1SUSPEND\0"):
return "Unknown magic string"
if MAX_SWAP_BADPAGES < self["nb_badpage"].value:
return "Invalid number of bad page (%u)" % self["nb_badpage"].value
return True
def getPageCount(self):
"""
Number of pages which can really be used for swapping:
number of page minus bad pages minus one page (used for the header)
"""
# -1 because first page is used for the header
return self["last_page"].value - self["nb_badpage"].value - 1
def createDescription(self):
if self["magic"].value == "S1SUSPEND\0":
text = "Suspend swap file version 1"
elif self["magic"].value == "SWAPSPACE2":
text = "Linux swap file version 2"
else:
text = "Linux swap file version 1"
nb_page = self.getPageCount()
return "%s, page size: %s, %s pages" % (
text, humanFilesize(PAGE_SIZE), nb_page)
def createFields(self):
# First kilobyte: boot sectors
yield RawBytes(self, "boot", 1024, "Space for disklabel etc.")
# Header
yield UInt32(self, "version")
yield UInt32(self, "last_page")
yield UInt32(self, "nb_badpage")
yield UUID(self, "sws_uuid")
yield UUID(self, "sws_volume")
yield NullBytes(self, "reserved", 117*4)
# Read bad pages (if any)
count = self["nb_badpage"].value
if count:
if MAX_SWAP_BADPAGES < count:
raise ParserError("Invalid number of bad page (%u)" % count)
yield GenericVector(self, "badpages", count, UInt32, "badpage")
# Read magic
padding = self.seekByte(PAGE_SIZE - 10, "padding", null=True)
if padding:
yield padding
yield String(self, "magic", 10, charset="ASCII")
# Read all pages
yield GenericVector(self, "pages", self["last_page"].value, Page, "page")
# Padding at the end
padding = self.seekBit(self.size, "end_padding", null=True)
if padding:
yield padding
|
chanceraine/nupic | refs/heads/master | nupic/math/stats.py | 40 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
stats.py defines functions and data structures related to statistical analysis.
"""
import random
import numpy
dtype = GetNTAReal()
def pickByDistribution(distribution, r=None):
"""
Pick a value according to the provided distribution.
@param distribution -- Probability distribution. Need not be normalized.
@param r -- Instance of random.Random. Uses the system instance if one is
not provided.
Example:
pickByDistribution([.2, .1])
returns 0 two thirds of the time and 1 one third of the time.
"""
if r is None:
r = random
x = r.uniform(0, sum(distribution))
for i, d in enumerate(distribution):
if x <= d:
return i
x -= d
def Indicator(pos, size, dtype):
"""Returns an array of length size and type dtype that is everywhere 0,
except in the index in pos.
Returns an array of length size and element type dtype.
Parameters
----------
pos: A single integer or that specifies
the position of the one entry that will be set.
size: The total size of the array to be returned.
dtype: The element type (compatible with NumPy array())
of the array to be returned.
"""
x = numpy.zeros(size, dtype=dtype)
x[pos] = 1
return x
def MultiArgMax(x):
"""Get tuple (actually a generator) of indices where the max value of
array x occurs. Requires that x have a max() method, as x.max()
(in the case of NumPy) is much faster than max(x).
For a simpler, faster argmax when there is only a single maximum entry,
or when knowing only the first index where the maximum occurs,
call argmax() on a NumPy array, nupic.bindings.iorange.WrappedVector or
nupic.NodeInput.
Returns Generator with the indices where the max value occurs.
Parameters
----------
x: Any sequence that has a max() method.
"""
m = x.max()
return (i for i, v in enumerate(x) if v == m)
def Any(sequence):
"""Returns true if any element of the sequence satisfies True.
Tests much faster (30%) than bool(sum(bool(x) for x in sequence)).
Returns A boolean value.
Parameters
----------
sequence: Any sequence whose elements can be evaluated as booleans.
"""
return bool(reduce(lambda x, y: x or y, sequence, False))
def All(sequence):
"""Returns true if all elements of the sequence satisfy True and x.
Returns A boolean value.
Parameters
----------
sequence: Any sequence whose elements can be evaluated as booleans.
"""
return bool(reduce(lambda x, y: x and y, sequence, True))
def Product(sequence):
"""Returns the product of the elements of the sequence.
Use numpy.prod() if the sequence is an array, as it will be faster.
Remember that the product of many numbers may rapidly overflow or
underflow the numeric precision of the computer.
Use a sum of the logs of the sequence elements instead when precision
should be maintained.
Returns A single value that is the product of all the sequence elements.
Parameters
----------
sequence: Any sequence whose elements can be multiplied by their
neighbors.
"""
return reduce(lambda x, y: x * y, sequence)
def MultiIndicator(pos, size, dtype):
"""Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos.
Returns An array of length size and element type dtype.
Parameters
----------
pos: A single integer or sequence of integers that specify
the position of ones to be set.
size: The total size of the array to be returned.
dtype: The element type (compatible with NumPy array())
of the array to be returned.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
for i in pos: x[i] = 1
else: x[pos] = 1
return x
def Distribution(pos, size, counts, dtype):
"""Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos. The non-zero indices
contain a normalized distribution based on the counts.
Returns An array of length size and element type dtype.
Parameters
----------
pos: A single integer or sequence of integers that specify
the position of ones to be set.
size: The total size of the array to be returned.
counts: The number of times we have observed each index.
dtype: The element type (compatible with NumPy array())
of the array to be returned.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
# calculate normalization constant
total = 0
for i in pos:
total += counts[i]
total = float(total)
# set included positions to normalized probability
for i in pos:
x[i] = counts[i]/total
# If we don't have a set of positions, assume there's only one position
else: x[pos] = 1
return x
class ConditionalProbabilityTable2D(object):
"""Holds frequencies in a 2D grid of bins.
Binning is not performed automatically by this class.
Bin updates must be done one row at a time.
Based on nupic::SparseMatrix which is a compressed sparse row matrix.
Number of columns cannot be changed once set.
Number of rows may be increased.
Also maintains the row and column sumProp distributions.
"""
def __init__(self, rowHint=None, ncols=None):
"""Constructs a new empty histogram with no rows or columns.
If rowHint is specified, ncols must be specified
(though not vice versa).
If ncols is specified, the number of columns cannot be changed
thereafter.
"""
self.hist_ = None
self.rowSums_ = None
self.colSums_ = None
if ncols:
if not rowHint: rowHint = 1
assert dtype
self.grow(rowHint, ncols)
else: assert not rowHint
self.hack_ = None
def numRows(self):
"""Gets the number of rows in the histogram.
Returns Integer number of rows.
"""
if self.hist_: return self.hist_.nRows()
else: return 0
def numColumns(self):
if self.hist_: return self.hist_.nCols()
else: return 0
def grow(self, rows, cols):
"""Grows the histogram to have rows rows and cols columns.
Must not have been initialized before, or already have the same
number of columns.
If rows is smaller than the current number of rows,
does not shrink.
Also updates the sizes of the row and column sums.
Parameters
----------
rows: Integer number of rows.
cols: Integer number of columns.
"""
if not self.hist_:
self.hist_ = SparseMatrix(rows, cols)
self.rowSums_ = numpy.zeros(rows, dtype=dtype)
self.colSums_ = numpy.zeros(cols, dtype=dtype)
self.hack_ = None
else:
oldRows = self.hist_.nRows()
oldCols = self.hist_.nCols()
nextRows = max(oldRows, rows)
nextCols = max(oldCols, cols)
if (oldRows < nextRows) or (oldCols < nextCols):
self.hist_.resize(nextRows, nextCols)
if oldRows < nextRows:
oldSums = self.rowSums_
self.rowSums_ = numpy.zeros(nextRows, dtype=dtype)
self.rowSums_[0:len(oldSums)] = oldSums
self.hack_ = None
if oldCols < nextCols:
oldSums = self.colSums_
self.colSums_ = numpy.zeros(nextCols, dtype=dtype)
self.colSums_[0:len(oldSums)] = oldSums
self.hack_ = None
def updateRow(self, row, distribution):
"""Add distribution to row row.
Distribution should be an array of probabilities or counts.
Parameters
----------
row: Integer index of the row to add to.
May be larger than the current number of rows, in which case
the histogram grows.
distribution: Array of length equal to the number of columns.
"""
self.grow(row+1, len(distribution))
self.hist_.axby(row, 1, 1, distribution)
self.rowSums_[row] += distribution.sum()
self.colSums_ += distribution
self.hack_ = None # Clear out the cached inference.
def inferRow(self, distribution):
"""Computes the sumProp probability of each row given the input probability
of each column. Normalizes the distribution in each column on the fly.
The semantics are as follows: If the distribution is P(col|e) where e is
the evidence is col is the column, and the CPD represents P(row|col), then
this calculates sum(P(col|e) P(row|col)) = P(row|e).
Returns array of length equal to the number of rows.
Parameters
----------
distribution: Array of length equal to the number of columns.
"""
# normalize over colSums_ because P(row|col) = P(col,row)/P(col)
return self.hist_ * (distribution / self.colSums_)
def inferRowEvidence(self, distribution):
"""Computes the probability of evidence given each row from the probability
of evidence given each column. Essentially, this just means that it sums
probabilities over (normalized) rows. Normalizes the distribution over
each row on the fly.
The semantics are as follows: If the distribution is P(e|col) where e is
evidence and col is the column, and the CPD is of P(col|row), then this
calculates sum(P(e|col) P(col|row)) = P(e|row).
Returns array of length equal to the number of rows.
Parameters
----------
distribution: Array of length equal to the number of columns.
"""
# normalize over rowSums_ because P(col|row) = P(col,row)/P(row).
return (self.hist_ * distribution) / self.rowSums_
def inferRowMaxProd(self, distribution):
return self.hist_.vecMaxProd(distribution)
def inferRowCompat(self, distribution):
"""Equivalent to the category inference of zeta1.TopLevel.
Computes the max_prod (maximum component of a component-wise multiply)
between the rows of the histogram and the incoming distribution.
May be slow if the result of clean_outcpd() is not valid.
Returns array of length equal to the number of rows.
Parameters
----------
distribution: Array of length equal to the number of columns.
"""
if self.hack_ is None:
self.clean_outcpd()
return self.hack_.vecMaxProd(distribution)
def clean_outcpd(self):
"""Hack to act like clean_outcpd on zeta1.TopLevelNode.
Take the max element in each to column, set it to 1, and set all the
other elements to 0.
Only called by inferRowMaxProd() and only needed if an updateRow()
has been called since the last clean_outcpd().
"""
m = self.hist_.toDense()
for j in xrange(m.shape[1]): # For each column.
cmax = m[:,j].max()
if cmax:
m[:,j] = numpy.array(m[:,j] == cmax, dtype=dtype)
self.hack_ = SparseMatrix(0, self.hist_.nCols())
for i in xrange(m.shape[0]):
self.hack_.addRow(m[i,:])
def ShannonEntropy(x):
x = numpy.asarray(x, dtype=float)
s = x.sum()
if s: p = x / s
else: p = x
assert (p >= 0).all()
p = p[p != 0] # Get rid of 0s.
return - numpy.dot(p, numpy.log(p))
def ShannonEntropyLog(lx):
lx = numpy.asarray(lx)
lx = lx - lx.max()
x = numpy.exp(lx)
s = x.sum()
return - ( ( numpy.dot(x, lx) / s ) - numpy.log(s) )
def DifferentialEntropy(mass, areas=1.0):
x = numpy.asarray(mass, dtype=float)
p = x / x.sum()
return -numpy.dot(p, numpy.log(p)) + numpy.dot(p, numpy.log(areas))
#----------------------------------------
#Fuzzy k-means
def fuzzyKmeans(samples,fixCenter=None,iter=5,fuzzParam=1.5):
#Not actually k means yet just 3 means
if fixCenter is not None:
dMeans = [min(samples)+0.01 , fixCenter ,max(samples)-0.01]
else:
dMeans = [min(samples)+0.01 , mean(samples) ,max(samples)-0.01]
begDeg = map(None,numpy.zeros(len(samples)))
midDeg = map(None,numpy.zeros(len(samples)))
endDeg = map(None,numpy.zeros(len(samples)))
for j in range(iter):
for k in range(len(samples)):
pBeg = (1.0/(samples[k] - dMeans[2])**2)**(1.0/(fuzzParam-1))
pMid = (1.0/(samples[k] - dMeans[1])**2)**(1.0/(fuzzParam-1))
pEnd = (1.0/(samples[k] - dMeans[0])**2)**(1.0/(fuzzParam-1))
nmlz = pBeg + pMid + pEnd
begDeg[k] = pBeg/nmlz; midDeg[k] = pMid/nmlz; endDeg[k] = pEnd/nmlz
#Update means 0 and 2, the other should stay at zero! (Change this for general purpose k-means)
dMeans[0] = numpy.nansum((numpy.array(endDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(endDeg)**fuzzParam)
if fixCenter is None:
dMeans[1] = numpy.nansum((numpy.array(midDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(midDeg)**fuzzParam)
dMeans[2] = numpy.nansum((numpy.array(begDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(begDeg)**fuzzParam)
return dMeans
|
ntymtsiv/tempest | refs/heads/master | tempest/api/compute/admin/test_flavors_access_negative.py | 2 | # Copyright 2013 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class FlavorsAccessNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Flavor Access API extension.
Add and remove Flavor Access require admin privileges.
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(FlavorsAccessNegativeTestJSON, cls).setUpClass()
if not test.is_extension_enabled('FlavorExtraData', 'compute'):
msg = "FlavorExtraData extension not enabled."
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
admin_client = cls._get_identity_admin_client()
cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
tenant_name)
cls.tenant_id = cls.tenant['id']
cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
flavors_client.
tenant_name)
cls.adm_tenant_id = cls.adm_tenant['id']
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
cls.disk = 10
@test.attr(type=['negative', 'gate'])
def test_flavor_access_list_with_public_flavor(self):
# Test to list flavor access with exceptions by querying public flavor
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='True')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
self.assertEqual(resp.status, 200)
self.assertRaises(exceptions.NotFound,
self.client.list_flavor_access,
new_flavor_id)
@test.attr(type=['negative', 'gate'])
def test_flavor_non_admin_add(self):
# Test to add flavor access as a user without admin privileges.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.add_flavor_access,
new_flavor['id'],
self.tenant_id)
@test.attr(type=['negative', 'gate'])
def test_flavor_non_admin_remove(self):
# Test to remove flavor access as a user without admin privileges.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# Add flavor access to a tenant.
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.addCleanup(self.client.remove_flavor_access,
new_flavor['id'], self.tenant_id)
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.remove_flavor_access,
new_flavor['id'],
self.tenant_id)
@test.attr(type=['negative', 'gate'])
def test_add_flavor_access_duplicate(self):
# Create a new flavor.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# Add flavor access to a tenant.
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.addCleanup(self.client.remove_flavor_access,
new_flavor['id'], self.tenant_id)
# An exception should be raised when adding flavor access to the same
# tenant
self.assertRaises(exceptions.Conflict,
self.client.add_flavor_access,
new_flavor['id'],
self.tenant_id)
@test.attr(type=['negative', 'gate'])
def test_remove_flavor_access_not_found(self):
# Create a new flavor.
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = data_utils.rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# An exception should be raised when flavor access is not found
self.assertRaises(exceptions.NotFound,
self.client.remove_flavor_access,
new_flavor['id'],
str(uuid.uuid4()))
class FlavorsAdminNegativeTestXML(FlavorsAccessNegativeTestJSON):
_interface = 'xml'
|
google/syzygy | refs/heads/master | third_party/numpy/files/numpy/lib/tests/test_arraysetops.py | 22 | """ Test functions for 1D array set operations.
"""
from numpy.testing import *
import numpy as np
from numpy.lib.arraysetops import *
import warnings
class TestAso(TestCase):
def test_unique( self ):
a = np.array( [5, 7, 1, 2, 1, 5, 7] )
ec = np.array( [1, 2, 5, 7] )
c = unique( a )
assert_array_equal( c, ec )
vals, indices = unique( a, return_index=True )
ed = np.array( [2, 3, 0, 1] )
assert_array_equal(vals, ec)
assert_array_equal(indices, ed)
vals, ind0, ind1 = unique( a, return_index=True,
return_inverse=True )
ee = np.array( [2, 3, 0, 1, 0, 2, 3] )
assert_array_equal(vals, ec)
assert_array_equal(ind0, ed)
assert_array_equal(ind1, ee)
assert_array_equal([], unique([]))
def test_intersect1d( self ):
# unique inputs
a = np.array( [5, 7, 1, 2] )
b = np.array( [2, 4, 3, 1, 5] )
ec = np.array( [1, 2, 5] )
c = intersect1d( a, b, assume_unique=True )
assert_array_equal( c, ec )
# non-unique inputs
a = np.array( [5, 5, 7, 1, 2] )
b = np.array( [2, 1, 4, 3, 3, 1, 5] )
ed = np.array( [1, 2, 5] )
c = intersect1d( a, b )
assert_array_equal( c, ed )
assert_array_equal([], intersect1d([],[]))
def test_setxor1d( self ):
a = np.array( [5, 7, 1, 2] )
b = np.array( [2, 4, 3, 1, 5] )
ec = np.array( [3, 4, 7] )
c = setxor1d( a, b )
assert_array_equal( c, ec )
a = np.array( [1, 2, 3] )
b = np.array( [6, 5, 4] )
ec = np.array( [1, 2, 3, 4, 5, 6] )
c = setxor1d( a, b )
assert_array_equal( c, ec )
a = np.array( [1, 8, 2, 3] )
b = np.array( [6, 5, 4, 8] )
ec = np.array( [1, 2, 3, 4, 5, 6] )
c = setxor1d( a, b )
assert_array_equal( c, ec )
assert_array_equal([], setxor1d([],[]))
def test_ediff1d(self):
zero_elem = np.array([])
one_elem = np.array([1])
two_elem = np.array([1,2])
assert_array_equal([],ediff1d(zero_elem))
assert_array_equal([0],ediff1d(zero_elem,to_begin=0))
assert_array_equal([0],ediff1d(zero_elem,to_end=0))
assert_array_equal([-1,0],ediff1d(zero_elem,to_begin=-1,to_end=0))
assert_array_equal([],ediff1d(one_elem))
assert_array_equal([1],ediff1d(two_elem))
def test_in1d(self):
a = np.array( [5, 7, 1, 2] )
b = np.array( [2, 4, 3, 1, 5] )
ec = np.array( [True, False, True, True] )
c = in1d( a, b, assume_unique=True )
assert_array_equal( c, ec )
a[0] = 8
ec = np.array( [False, False, True, True] )
c = in1d( a, b, assume_unique=True )
assert_array_equal( c, ec )
a[0], a[3] = 4, 8
ec = np.array( [True, False, True, False] )
c = in1d( a, b, assume_unique=True )
assert_array_equal( c, ec )
a = np.array([5,4,5,3,4,4,3,4,3,5,2,1,5,5])
b = [2,3,4]
ec = [False, True, False, True, True, True, True, True, True, False,
True, False, False, False]
c = in1d(a, b)
assert_array_equal(c, ec)
b = b + [5, 5, 4]
ec = [True, True, True, True, True, True, True, True, True, True,
True, False, True, True]
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([True, False, True, True])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 1, 2])
b = np.array([2, 4, 3, 3, 1, 5])
ec = np.array([True, False, True, True, True])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5])
b = np.array([2])
ec = np.array([False])
c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 5])
b = np.array([2, 2])
ec = np.array([False, False])
c = in1d(a, b)
assert_array_equal(c, ec)
assert_array_equal(in1d([], []), [])
def test_in1d_char_array( self ):
a = np.array(['a', 'b', 'c','d','e','c','e','b'])
b = np.array(['a','c'])
ec = np.array([True, False, True, False, False, True, False, False])
c = in1d(a, b)
assert_array_equal(c, ec)
def test_union1d( self ):
a = np.array( [5, 4, 7, 1, 2] )
b = np.array( [2, 4, 3, 3, 2, 1, 5] )
ec = np.array( [1, 2, 3, 4, 5, 7] )
c = union1d( a, b )
assert_array_equal( c, ec )
assert_array_equal([], union1d([],[]))
def test_setdiff1d( self ):
a = np.array( [6, 5, 4, 7, 1, 2, 7, 4] )
b = np.array( [2, 4, 3, 3, 2, 1, 5] )
ec = np.array( [6, 7] )
c = setdiff1d( a, b )
assert_array_equal( c, ec )
a = np.arange( 21 )
b = np.arange( 19 )
ec = np.array( [19, 20] )
c = setdiff1d( a, b )
assert_array_equal( c, ec )
assert_array_equal([], setdiff1d([],[]))
def test_setdiff1d_char_array(self):
a = np.array(['a','b','c'])
b = np.array(['a','b','s'])
assert_array_equal(setdiff1d(a,b),np.array(['c']))
def test_manyways( self ):
a = np.array( [5, 7, 1, 2, 8] )
b = np.array( [9, 8, 2, 4, 3, 1, 5] )
c1 = setxor1d( a, b )
aux1 = intersect1d( a, b )
aux2 = union1d( a, b )
c2 = setdiff1d( aux2, aux1 )
assert_array_equal( c1, c2 )
if __name__ == "__main__":
run_module_suite()
|
bionoid/kivy | refs/heads/master | kivy/modules/touchring.py | 10 | '''
Touchring
=========
Shows rings around every touch on the surface / screen. You can use this module
to check that you don't have any calibration issues with touches.
Configuration
-------------
:Parameters:
`image`: str, defaults to '<kivy>/data/images/ring.png'
Filename of the image to use.
`scale`: float, defaults to 1.
Scale of the image.
`alpha`: float, defaults to 1.
Opacity of the image.
Example
-------
In your configuration (`~/.kivy/config.ini`), you can add something like
this::
[modules]
touchring = image=mypointer.png,scale=.3,alpha=.7
'''
__all__ = ('start', 'stop')
from kivy.core.image import Image
from kivy.graphics import Color, Rectangle
from kivy import kivy_data_dir
from os.path import join
pointer_image = None
pointer_scale = 1.0
pointer_alpha = 0.7
def _touch_down(win, touch):
ud = touch.ud
with win.canvas.after:
ud['tr.color'] = Color(1, 1, 1, pointer_alpha)
iw, ih = pointer_image.size
ud['tr.rect'] = Rectangle(
pos=(
touch.x - (pointer_image.width / 2. * pointer_scale),
touch.y - (pointer_image.height / 2. * pointer_scale)),
size=(iw * pointer_scale, ih * pointer_scale),
texture=pointer_image.texture)
if not ud.get('tr.grab', False):
ud['tr.grab'] = True
touch.grab(win)
def _touch_move(win, touch):
ud = touch.ud
if not ud.get('tr.rect', False):
_touch_down(win, touch)
ud['tr.rect'].pos = (
touch.x - (pointer_image.width / 2. * pointer_scale),
touch.y - (pointer_image.height / 2. * pointer_scale))
def _touch_up(win, touch):
if touch.grab_current is win:
ud = touch.ud
win.canvas.after.remove(ud['tr.color'])
win.canvas.after.remove(ud['tr.rect'])
if ud.get('tr.grab') is True:
touch.ungrab(win)
ud['tr.grab'] = False
def start(win, ctx):
# XXX use ctx !
global pointer_image, pointer_scale, pointer_alpha
pointer_fn = ctx.config.get('image',
'atlas://data/images/defaulttheme/ring')
pointer_scale = float(ctx.config.get('scale', 1.0))
pointer_alpha = float(ctx.config.get('alpha', 1.0))
pointer_image = Image(pointer_fn)
win.bind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
def stop(win, ctx):
win.unbind(on_touch_down=_touch_down,
on_touch_move=_touch_move,
on_touch_up=_touch_up)
|
don-github/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/modulestore/split_mongo/__init__.py | 201 | """
General utilities
"""
from collections import namedtuple
from contracts import contract, check
from opaque_keys.edx.locator import BlockUsageLocator
class BlockKey(namedtuple('BlockKey', 'type id')):
__slots__ = ()
@contract(type="string[>0]")
def __new__(cls, type, id):
return super(BlockKey, cls).__new__(cls, type, id)
@classmethod
@contract(usage_key=BlockUsageLocator)
def from_usage_key(cls, usage_key):
return cls(usage_key.block_type, usage_key.block_id)
CourseEnvelope = namedtuple('CourseEnvelope', 'course_key structure')
|
varunkamra/kuma | refs/heads/master | vendor/packages/babel/messages/mofile.py | 64 | # -*- coding: utf-8 -*-
"""
babel.messages.mofile
~~~~~~~~~~~~~~~~~~~~~
Writing of files in the ``gettext`` MO (machine object) format.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import array
import struct
from babel.messages.catalog import Catalog, Message
from babel._compat import range_type, array_tobytes
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def read_mo(fileobj):
"""Read a binary MO file from the given file-like object and return a
corresponding `Catalog` object.
:param fileobj: the file-like object to read the MO file from
:note: The implementation of this function is heavily based on the
``GNUTranslations._parse`` method of the ``gettext`` module in the
standard library.
"""
catalog = Catalog()
headers = {}
filename = getattr(fileobj, 'name', '')
buf = fileobj.read()
buflen = len(buf)
unpack = struct.unpack
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?
if magic == LE_MAGIC:
version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == BE_MAGIC:
version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary
for i in range_type(0, msgcount):
mlen, moff = unpack(ii, buf[origidx:origidx + 8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx + 8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastkey = key = None
for item in tmsg.splitlines():
item = item.strip()
if not item:
continue
if b':' in item:
key, value = item.split(b':', 1)
lastkey = key = key.strip().lower()
headers[key] = value.strip()
elif lastkey:
headers[lastkey] += b'\n' + item
if b'\x04' in msg: # context
ctxt, msg = msg.split(b'\x04')
else:
ctxt = None
if b'\x00' in msg: # plural forms
msg = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
if catalog.charset:
msg = [x.decode(catalog.charset) for x in msg]
tmsg = [x.decode(catalog.charset) for x in tmsg]
else:
if catalog.charset:
msg = msg.decode(catalog.charset)
tmsg = tmsg.decode(catalog.charset)
catalog[msg] = Message(msg, tmsg, context=ctxt)
# advance to next entry in the seek tables
origidx += 8
transidx += 8
catalog.mime_headers = headers.items()
return catalog
def write_mo(fileobj, catalog, use_fuzzy=False):
"""Write a catalog to the specified file-like object using the GNU MO file
format.
>>> from babel.messages import Catalog
>>> from gettext import GNUTranslations
>>> from StringIO import StringIO
>>> catalog = Catalog(locale='en_US')
>>> catalog.add('foo', 'Voh')
<Message ...>
>>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
<Message ...>
>>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
<Message ...>
>>> catalog.add('Fizz', '')
<Message ...>
>>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
<Message ...>
>>> buf = StringIO()
>>> write_mo(buf, catalog)
>>> buf.seek(0)
>>> translations = GNUTranslations(fp=buf)
>>> translations.ugettext('foo')
u'Voh'
>>> translations.ungettext('bar', 'baz', 1)
u'Bahr'
>>> translations.ungettext('bar', 'baz', 2)
u'Batz'
>>> translations.ugettext('fuz')
u'fuz'
>>> translations.ugettext('Fizz')
u'Fizz'
>>> translations.ugettext('Fuzz')
u'Fuzz'
>>> translations.ugettext('Fuzzes')
u'Fuzzes'
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param use_fuzzy: whether translations marked as "fuzzy" should be included
in the output
"""
messages = list(catalog)
if not use_fuzzy:
messages[1:] = [m for m in messages[1:] if not m.fuzzy]
messages.sort()
ids = strs = b''
offsets = []
for message in messages:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
if message.pluralizable:
msgid = b'\x00'.join([
msgid.encode(catalog.charset) for msgid in message.id
])
msgstrs = []
for idx, string in enumerate(message.string):
if not string:
msgstrs.append(message.id[min(int(idx), 1)])
else:
msgstrs.append(string)
msgstr = b'\x00'.join([
msgstr.encode(catalog.charset) for msgstr in msgstrs
])
else:
msgid = message.id.encode(catalog.charset)
if not message.string:
msgstr = message.id.encode(catalog.charset)
else:
msgstr = message.string.encode(catalog.charset)
if message.context:
msgid = b'\x04'.join([message.context.encode(catalog.charset),
msgid])
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + b'\x00'
strs += msgstr + b'\x00'
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
keystart = 7 * 4 + 16 * len(messages)
valuestart = keystart + len(ids)
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
koffsets = []
voffsets = []
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
fileobj.write(struct.pack('Iiiiiii',
LE_MAGIC, # magic
0, # version
len(messages), # number of entries
7 * 4, # start of key index
7 * 4 + len(messages) * 8, # start of value index
0, 0 # size and offset of hash table
) + array_tobytes(array.array("i", offsets)) + ids + strs)
|
tkaitchuck/nupic | refs/heads/master | external/linux64/lib/python2.6/site-packages/psutil/_pslinux.py | 4 | #!/usr/bin/env python
#
# $Id: _pslinux.py 800 2010-11-12 21:51:25Z g.rodola $
#
__all__ = ["NUM_CPUS", "TOTAL_PHYMEM",
"PlatformProcess",
"avail_phymem", "used_phymem", "total_virtmem", "avail_virtmem",
"used_virtmem", "get_system_cpu_times", "pid_exists", "get_pid_list",
"phymem_buffers", "cached_phymem"
]
import os
import errno
import socket
import struct
import sys
import base64
try:
from collections import namedtuple
except ImportError:
from psutil.compat import namedtuple # python < 2.6
from psutil import _psposix
from psutil.error import AccessDenied, NoSuchProcess
def _get_uptime():
"""Return system boot time (epoch in seconds)"""
f = open('/proc/stat', 'r')
for line in f:
if line.startswith('btime'):
f.close()
return float(line.strip().split()[1])
def _get_num_cpus():
"""Return the number of CPUs on the system"""
num = 0
f = open('/proc/cpuinfo', 'r')
for line in f:
if line.startswith('processor'):
num += 1
f.close()
return num
def _get_total_phymem():
"""Return the total amount of physical memory, in bytes"""
f = open('/proc/meminfo', 'r')
for line in f:
if line.startswith('MemTotal:'):
f.close()
return int(line.split()[1]) * 1024
# Number of clock ticks per second
_CLOCK_TICKS = os.sysconf(os.sysconf_names["SC_CLK_TCK"])
_UPTIME = _get_uptime()
NUM_CPUS = _get_num_cpus()
TOTAL_PHYMEM = _get_total_phymem()
del _get_uptime, _get_num_cpus, _get_total_phymem
# http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h
_TCP_STATES_TABLE = {"01" : "ESTABLISHED",
"02" : "SYN_SENT",
"03" : "SYN_RECV",
"04" : "FIN_WAIT1",
"05" : "FIN_WAIT2",
"06" : "TIME_WAIT",
"07" : "CLOSE",
"08" : "CLOSE_WAIT",
"09" : "LAST_ACK",
"0A" : "LISTEN",
"0B" : "CLOSING"
}
def avail_phymem():
"""Return the amount of physical memory available, in bytes."""
f = open('/proc/meminfo', 'r')
free = None
_flag = False
for line in f:
if line.startswith('MemFree:'):
free = int(line.split()[1]) * 1024
break
f.close()
return free
def used_phymem():
""""Return the amount of physical memory used, in bytes."""
return (TOTAL_PHYMEM - avail_phymem())
def total_virtmem():
""""Return the total amount of virtual memory, in bytes."""
f = open('/proc/meminfo', 'r')
for line in f:
if line.startswith('SwapTotal:'):
f.close()
return int(line.split()[1]) * 1024
def avail_virtmem():
"""Return the amount of virtual memory currently in use on the
system, in bytes.
"""
f = open('/proc/meminfo', 'r')
for line in f:
if line.startswith('SwapFree:'):
f.close()
return int(line.split()[1]) * 1024
def used_virtmem():
"""Return the amount of used memory currently in use on the system,
in bytes.
"""
return total_virtmem() - avail_virtmem()
def cached_phymem():
"""Return the amount of cached memory on the system, in bytes.
This reflects the "cached" column of free command line utility.
"""
f = open('/proc/meminfo', 'r')
for line in f:
if line.startswith('Cached:'):
f.close()
return int(line.split()[1]) * 1024
def phymem_buffers():
"""Return the amount of physical memory buffers used by the
kernel in bytes.
This reflects the "buffers" column of free command line utility.
"""
f = open('/proc/meminfo', 'r')
for line in f:
if line.startswith('Buffers:'):
f.close()
return int(line.split()[1]) * 1024
def get_system_cpu_times():
"""Return a dict representing the following CPU times:
user, nice, system, idle, iowait, irq, softirq.
"""
f = open('/proc/stat', 'r')
values = f.readline().split()
f.close()
values = values[1:8]
values = tuple([float(x) / _CLOCK_TICKS for x in values])
return dict(user=values[0], nice=values[1], system=values[2], idle=values[3],
iowait=values[4], irq=values[5], softirq=values[6])
def get_pid_list():
"""Returns a list of PIDs currently running on the system."""
pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]
# special case for 0 (kernel process) PID
pids.insert(0, 0)
return pids
def pid_exists(pid):
"""Check For the existence of a unix pid."""
return _psposix.pid_exists(pid)
# --- decorators
def wrap_exceptions(callable):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return callable(self, *args, **kwargs)
except (OSError, IOError), err:
if err.errno == errno.ENOENT: # no such file or directory
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
class LinuxProcess(object):
"""Linux process implementation."""
_meminfo_ntuple = namedtuple('meminfo', 'rss vms')
_cputimes_ntuple = namedtuple('cputimes', 'user system')
_openfile_ntuple = namedtuple('openfile', 'path fd')
_connection_ntuple = namedtuple('connection', 'fd family type local_address '
'remote_address status')
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
if self.pid == 0:
return 'sched' # special case for kernel process
f = open("/proc/%s/stat" % self.pid)
try:
name = f.read().split(' ')[1].replace('(', '').replace(')', '')
finally:
f.close()
# XXX - gets changed later and probably needs refactoring
return name
def get_process_exe(self):
if self.pid in (0, 2):
return "" # special case for kernel processes
try:
exe = os.readlink("/proc/%s/exe" % self.pid)
except (OSError, IOError), err:
if err.errno == errno.ENOENT:
# no such file error; might be raised also if the
# path actually exists for system processes with
# low pids (about 0-20)
if os.path.lexists("/proc/%s/exe" % self.pid):
return ""
else:
# ok, it is a process which has gone away
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
# It seems symlinks can point to a deleted/invalid location
# (this usually happens with "pulseaudio" process).
# However, if we had permissions to execute readlink() it's
# likely that we'll be able to figure out exe from argv[0]
# later on.
if exe.endswith(" (deleted)") and not os.path.isfile(exe):
return ""
return exe
@wrap_exceptions
def get_process_cmdline(self):
if self.pid == 0:
return [] # special case for kernel process
f = open("/proc/%s/cmdline" % self.pid)
try:
# return the args as a list
return [x for x in f.read().split('\x00') if x]
finally:
f.close()
@wrap_exceptions
def get_cpu_times(self):
# special case for 0 (kernel process) PID
if self.pid == 0:
return self._cputimes_ntuple(0.0, 0.0)
f = open("/proc/%s/stat" % self.pid)
st = f.read().strip()
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
utime = float(values[11]) / _CLOCK_TICKS
stime = float(values[12]) / _CLOCK_TICKS
return self._cputimes_ntuple(utime, stime)
@wrap_exceptions
def get_process_create_time(self):
# special case for 0 (kernel processes) PID; return system uptime
if self.pid == 0:
return _UPTIME
f = open("/proc/%s/stat" % self.pid)
st = f.read().strip()
f.close()
# ignore the first two values ("pid (exe)")
st = st[st.find(')') + 2:]
values = st.split(' ')
# According to documentation, starttime is in field 21 and the
# unit is jiffies (clock ticks).
# We first divide it for clock ticks and then add uptime returning
# seconds since the epoch, in UTC.
starttime = (float(values[19]) / _CLOCK_TICKS) + _UPTIME
return starttime
@wrap_exceptions
def get_memory_info(self):
# special case for 0 (kernel processes) PID
if self.pid == 0:
return self._meminfo_ntuple(0, 0)
f = open("/proc/%s/status" % self.pid)
virtual_size = 0
resident_size = 0
_flag = False
for line in f:
if (not _flag) and line.startswith("VmSize:"):
virtual_size = int(line.split()[1]) * 1024
_flag = True
elif line.startswith("VmRSS"):
resident_size = int(line.split()[1]) * 1024
break
f.close()
return self._meminfo_ntuple(resident_size, virtual_size)
@wrap_exceptions
def get_process_cwd(self):
if self.pid == 0:
return ''
return os.readlink("/proc/%s/cwd" % self.pid)
@wrap_exceptions
def get_process_num_threads(self):
if self.pid == 0:
return 0
f = open("/proc/%s/status" % self.pid)
for line in f:
if line.startswith("Threads:"):
f.close()
return int(line.split()[1])
@wrap_exceptions
def get_open_files(self):
retlist = []
files = os.listdir("/proc/%s/fd" % self.pid)
for fd in files:
file = "/proc/%s/fd/%s" % (self.pid, fd)
if os.path.islink(file):
file = os.readlink(file)
if file.startswith("socket:["):
continue
if file.startswith("pipe:["):
continue
if file == "[]":
continue
if os.path.isfile(file) and not file in retlist:
ntuple = self._openfile_ntuple(file, int(fd))
retlist.append(ntuple)
return retlist
# --- lsof implementation
#
# def get_open_files(self):
# lsof = _psposix.LsofParser(self.pid, self._process_name)
# return lsof.get_process_open_files()
@wrap_exceptions
def get_connections(self):
if self.pid == 0:
return []
inodes = {}
# os.listdir() is gonna raise a lot of access denied
# exceptions in case of unprivileged user; that's fine:
# lsof does the same so it's unlikely that we can to better.
for fd in os.listdir("/proc/%s/fd" % self.pid):
try:
inode = os.readlink("/proc/%s/fd/%s" % (self.pid, fd))
except OSError:
continue
if inode.startswith('socket:['):
# the process is using a socket
inode = inode[8:][:-1]
inodes[inode] = fd
if not inodes:
# no connections for this process
return []
def process(file, family, _type):
retlist = []
f = open(file)
f.readline() # skip the first line
for line in f:
_, laddr, raddr, status, _, _, _, _, _, inode = line.split()[:10]
if inode in inodes:
laddr = self._decode_address(laddr, family)
raddr = self._decode_address(raddr, family)
if _type == socket.SOCK_STREAM:
status = _TCP_STATES_TABLE[status]
else:
status = ""
fd = int(inodes[inode])
conn = self._connection_ntuple(fd, family, _type, laddr,
raddr, status)
retlist.append(conn)
f.close()
return retlist
tcp4 = process("/proc/net/tcp", socket.AF_INET, socket.SOCK_STREAM)
tcp6 = process("/proc/net/tcp6", socket.AF_INET6, socket.SOCK_STREAM)
udp4 = process("/proc/net/udp", socket.AF_INET, socket.SOCK_DGRAM)
udp6 = process("/proc/net/udp6", socket.AF_INET6, socket.SOCK_DGRAM)
return tcp4 + tcp6 + udp4 + udp6
# --- lsof implementation
#
# def get_connections(self):
# lsof = _psposix.LsofParser(self.pid, self._process_name)
# return lsof.get_process_connections()
@wrap_exceptions
def get_process_ppid(self):
if self.pid == 0:
return 0
f = open("/proc/%s/status" % self.pid)
for line in f:
if line.startswith("PPid:"):
# PPid: nnnn
f.close()
return int(line.split()[1])
@wrap_exceptions
def get_process_uid(self):
if self.pid == 0:
return 0
f = open("/proc/%s/status" % self.pid)
for line in f:
if line.startswith('Uid:'):
# Uid line provides 4 values which stand for real,
# effective, saved set, and file system UIDs.
# We want to provide real UID only.
f.close()
return int(line.split()[1])
@wrap_exceptions
def get_process_gid(self):
if self.pid == 0:
return 0
f = open("/proc/%s/status" % self.pid)
for line in f:
if line.startswith('Gid:'):
# Uid line provides 4 values which stand for real,
# effective, saved set, and file system GIDs.
# We want to provide real GID only.
f.close()
return int(line.split()[1])
@staticmethod
def _decode_address(addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IPv4 address portion is a little-endian four-byte hexadecimal
number; that is, the least significant byte is listed first,
so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
if sys.version_info >= (3,):
ip = ip.encode('ascii')
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if family == socket.AF_INET:
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else: # IPv6
# old version - let's keep it, just in case...
#ip = ip.decode('hex')
#return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
ip = socket.inet_ntop(socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
return (ip, port)
PlatformProcess = LinuxProcess
|
cdrooom/odoo | refs/heads/master | addons/l10n_hr/__openerp__.py | 8 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Module: l10n_hr
# Author: Goran Kliska
# mail: goran.kliska(AT)slobodni-programi.hr
# Copyright: Slobodni programi d.o.o., Zagreb
# Contributions:
# Tomislav Bošnjaković, Storm Computers d.o.o. :
# - account types
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Croatia - RRIF 2012 COA",
"description": """
Croatian localisation.
======================
Author: Goran Kliska, Slobodni programi d.o.o., Zagreb
http://www.slobodni-programi.hr
Contributions:
Tomislav Bošnjaković, Storm Computers: tipovi konta
Ivan Vađić, Slobodni programi: tipovi konta
Description:
Croatian Chart of Accounts (RRIF ver.2012)
RRIF-ov računski plan za poduzetnike za 2012.
Vrste konta
Kontni plan prema RRIF-u, dorađen u smislu kraćenja naziva i dodavanja analitika
Porezne grupe prema poreznoj prijavi
Porezi PDV obrasca
Ostali porezi
Osnovne fiskalne pozicije
Izvori podataka:
http://www.rrif.hr/dok/preuzimanje/rrif-rp2011.rar
http://www.rrif.hr/dok/preuzimanje/rrif-rp2012.rar
""",
"version": "12.2",
"author": "OpenERP Croatian Community",
"category": 'Localization/Account Charts',
"website": "https://code.launchpad.net/openobject-croatia",
'depends': [
'account',
'account_chart',
],
'data': [
'data/account.account.type.csv',
'data/account.tax.code.template.csv',
'data/account.account.template.csv',
'l10n_hr_chart_template.xml',
'l10n_hr_wizard.xml',
'data/account.tax.template.csv',
'data/fiscal_position_template.xml',
],
"demo": [],
'test': [],
"active": False,
"installable": True,
}
|
felipelindemberg/ControleMultimidiaUniversal | refs/heads/master | Python_Controle_Multimidia_Universal/trunk/Comodo/Room.py | 1 | # coding: utf-8
import sched, time
from threading import Timer
from TV import * # @UnusedWildImport
from SOUND import * # @UnusedWildImport
class Room:
"""Classe Room é uma classe que vai simular um cômodo de uma residência
:version 153
:author Felipe Lindemberg
"""
def __init__(self, name):
"""Construtor da classe
:Param name: Nome do cômodo a ser criado
:Type name: String
"""
self.__rooms = {}
self.__port = None
self.__name = name
self.__numberOfPeoples = 0
self.__equipments = {}
self.addEquipment("tv", TV())
self.addEquipment("som", SOUND())
self.__control = False
self.__eventTimer = sched.scheduler(time.time, time.sleep)
self.__cancelEventTimer = None
def setPort(self, port):
"""Método modificador da porta do cômodo
:Param port: Porta do cômodo
:Type port: String
"""
self.__port = port
def getPort(self):
"""Método acessador da porta do cômodo
:Return: Porta do cômodo
:Rtype: String
"""
return self.__port
def sleep(self, time):
"""Método acessador da porta do cômodo
:Return: Porta do cômodo
:Rtype: String
"""
print("entre no sleep")
self.__cancelEventTimer = Timer(time, self.powerOffEquipments, ())
self.__cancelEventTimer.start()
#self.__cancelEventTimer = self.__eventTimer.enterabs(time, 1, self.powerOffEquipments, ())
return "oi"
def cancelSleep(self):
"""Método acessador da porta do cômodo
:Return: Porta do cômodo
:Rtype: String
"""
if(self.__cancelEventTimer != None):
self.__cancelEventTimer.cancel()
self.__cancelEventTimer = None
return "oi"
def addEquipment(self, nameEquipment, equipment):
"""Método que faz a adição de equipamentos no cômodo
:Param nameEquipment: Nome do equipamento
:Type nameEquipment: String
:Param equipment: Equipamento
:Type equipment: Objeto Aparelho
:Return: Booleano correspondente a adição do equipamento
:Rtype: Boolean
"""
nameEquipment = nameEquipment.lower()
if(nameEquipment not in self.__equipments.keys()):
self.__equipments[nameEquipment] = equipment
return True
return False
def getEquipment(self, nameEquipment):
"""Método acessador do equipamento presente no cômodo
:Param nameEquipment: Nome do equipamento
:Type nameEquipment: String
:Return: O equipamento
:Rtype: Objeto Aparelho
"""
nameEquipment = nameEquipment.lower()
if(nameEquipment in self.__equipments.keys()):
return self.__equipments[nameEquipment]
return False
def controlIsFound(self, isFound):
"""Método modificador da presença do controle no cômodo
:Param isFound: Booleano correspondente a presença
:Type isFound: Boolean
"""
self.__control = isFound
numTemp = self.getNumberOfPeoples()
if(isFound):
self.setNumberOfPeoples(numTemp + 1)
else:
self.setNumberOfPeoples(numTemp - 1)
def getControlIsFound(self):
"""Método acessador da presença do controle no cômodo
:Return: Booleano correspondente a presença
:Rtype: Boolean
"""
return self.__control
def getEquipments(self):
"""Método acessador dos equipamentos presentes no cômodo
:Return: Equipamentos do cômodo
:Rtype: Lista
"""
return self.__equipments.keys()
def getName(self):
"""Método acessador do nome do cômodo
:Return: O nome do cômodo
:Rtype: String
"""
return self.__name
def getSound(self):
"""Método acessador de Som
:Return: O equipamento Som
:Rtype: Objeto Som
"""
return self.getEquipment("Som")
#return self.som
def getTv(self):
"""Método acessador de TV
:Return: O equipamento TV
:Rtype: Objeto TV
"""
return self.getEquipment("TV")
def getNumberOfPeoples(self):
"""Método acessador do número de pessoas presentes no cômodo
:Return: Número de pessoas presentes no cômodo
:TypeReturn: Inteiro
"""
return self.__numberOfPeoples
def setNumberOfPeoples(self, number):
"""Método modificador do número de pessoas presentes no cômodo
:Param number: Número de pessoas
:Type number: Inteiro
:Return: Booleano referente a modificação
:Rtype: Boolean
"""
number = int(number)
if (((number >= 0 and not self.__control ) or (number >= 1 and self.__control)) and number < 10):
self.__numberOfPeoples = number
return True
elif (number >= 10):
self.__numberOfPeoples = 9
return False
elif (number < 0 and self.__control):
self.__numberOfPeoples = 0
return False
return False
def getCommands(self):
"""Método acessador dos comandos enviados aos aparelhos
:Return: Comandos
:Rtype: Lista
"""
return self.__rooms.keys()
def powerOffEquipments(self):
"""Método que faz o desligamento dos aparelhos do cômodo
"""
print("tou no poweroff")
self.getTv().turnOff()
self.getSound().turnOff()
|
elektroschmock/android_kernel_google_msm | refs/heads/cm-10.2 | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
nlholdem/icodoom | refs/heads/master | .venv/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/graph_actions.py | 23 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level operations on graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import threading
import time
import numpy as np
from six import reraise
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import ops as contrib_ops
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor as tf_supervisor
from tensorflow.python.util.deprecation import deprecated
# Singleton for SummaryWriter per logdir folder.
_SUMMARY_WRITERS = {}
# Lock protecting _SUMMARY_WRITERS
_summary_writer_lock = threading.Lock()
_graph_action_deprecation = deprecated(
'2017-02-15',
'graph_actions.py will be deleted. Use tf.train.* utilities instead. '
'You can use learn/estimators/estimator.py as an example.')
@_graph_action_deprecation
def clear_summary_writers():
"""Clear cached summary writers. Currently only used for unit tests."""
return summary_io.SummaryWriterCache.clear()
def get_summary_writer(logdir):
"""Returns single SummaryWriter per logdir in current run.
Args:
logdir: str, folder to write summaries.
Returns:
Existing `SummaryWriter` object or new one if never wrote to given
directory.
"""
return summary_io.SummaryWriterCache.get(logdir)
def _make_saver(graph, keep_checkpoint_max=5):
vars_to_save = (graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
graph.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
if vars_to_save:
return tf_saver.Saver(vars_to_save,
sharded=True,
max_to_keep=keep_checkpoint_max)
else:
return None
def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):
logging.info('Loading model from checkpoint: %s.', checkpoint_path)
saver = saver or _make_saver(graph)
if saver:
saver.restore(session, checkpoint_path)
else:
logging.info('No variables found in graph, not creating Saver() object.')
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
def _monitored_train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
supervisor_save_model_steps=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000.0,
supervisor_save_summaries_secs=None,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
hooks=None,
max_steps=None):
"""Train a model via monitored_session.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss. A `0` or negative value disables logging.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save checkpoints every this many seconds. Can
not be specified with `supervisor_save_model_steps`.
supervisor_save_model_steps: Save checkpoints every this many steps. Can not
be specified with `supervisor_save_model_secs`.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to `tf.Saver` constructor.
keep_checkpoint_every_n_hours: In addition to keeping the most recent
`keep_checkpoint_max` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
supervisor_save_summaries_secs: Save summaries every
`supervisor_save_summaries_secs` seconds when training.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` steps when training. Exactly one of
`supervisor_save_model_steps` and `supervisor_save_model_secs` should be
specified, and the other should be None.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
if hooks is None:
hooks = []
if not isinstance(hooks, list):
raise ValueError('Hooks should be a list.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
if max_steps is not None:
try:
start_step = load_variable(output_dir, global_step_tensor.name)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return None
except: # pylint: disable=bare-except
pass
# Adapted SessionRunHooks such as ExportMonitor depend on the
# CheckpointSaverHook to be executed before they should be executed.
# The `hooks` param comprises of deprecated monitor hooks
# (such as ExportMonitor). Appending them after the basic_session_run_hooks.
all_hooks = []
with graph.as_default():
all_hooks.append(basic_session_run_hooks.NanTensorHook(
loss_op, fail_on_nan_loss=fail_on_nan_loss))
if log_every_steps > 0:
all_hooks.append(basic_session_run_hooks.LoggingTensorHook({
'loss': loss_op.name,
'step': global_step_tensor.name
}, every_n_iter=log_every_steps))
def make_saver():
return tf_saver.Saver(
sharded=True,
max_to_keep=keep_checkpoint_max,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
defer_build=True)
scaffold = monitored_session.Scaffold(
init_op=init_op,
init_feed_dict=init_feed_dict,
init_fn=init_fn,
saver=monitored_session.Scaffold.get_or_default('saver',
ops.GraphKeys.SAVERS,
make_saver))
if not supervisor_is_chief:
session_creator = monitored_session.WorkerSessionCreator(
scaffold=scaffold,
master=supervisor_master)
else:
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=output_dir,
master=supervisor_master)
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer))
all_hooks.append(
basic_session_run_hooks.SummarySaverHook(
save_secs=supervisor_save_summaries_secs,
save_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer,
scaffold=scaffold))
if (supervisor_save_model_secs is not None
or supervisor_save_model_steps is not None):
all_hooks.append(
basic_session_run_hooks.CheckpointSaverHook(
output_dir,
save_secs=supervisor_save_model_secs,
save_steps=supervisor_save_model_steps,
scaffold=scaffold))
if steps is not None or max_steps is not None:
all_hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
all_hooks.extend(hooks)
with monitored_session.MonitoredSession(
session_creator=session_creator,
hooks=all_hooks) as super_sess:
loss = None
while not super_sess.should_stop():
_, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else
None)
summary_io.SummaryWriterCache.clear()
return loss
@_graph_action_deprecation
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
keep_checkpoint_max=5,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None,
max_steps=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to tf.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
while True:
try:
return _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps)
except errors.AbortedError:
# Happens when PS restarts, keep training.
logging.warning('Training got Aborted error. Keep training.')
def _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps):
"""See train."""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = load_variable(output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# Add default chief monitors if none were provided.
if not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer) if supervisor_is_chief else []
# TODO(ipolosukhin): Replace all functionality of Supervisor
# with Chief-Exclusive Monitors.
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
if max_steps is None:
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph, keep_checkpoint_max),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
loss_value = outputs[loss_op.name]
if np.isnan(loss_value):
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
raise monitors_lib.NanLossDuringTrainingError()
else:
logging.warning(failure_message)
if should_stop:
break
this_step = get_current_step()
if this_step <= last_step:
logging.error(
'Global step was not incremented by train op at step %s'
': new step %d', last_step, this_step)
last_step = this_step
is_last_step = (max_steps is not None) and (last_step >= max_steps)
if is_last_step or (last_step - last_log_step >= log_every_steps):
logging.info(
'training step %d, loss = %.5f (%.3f sec/batch).',
last_step, loss_value, float(time.time() - start_time))
last_log_step = last_step
except errors.OutOfRangeError as e:
logging.warn('Got exception during tf.learn training loop possibly '
'due to exhausted input queue %s.', e)
except StopIteration:
logging.info('Exhausted input iterarator.')
except BaseException as e: # pylint: disable=broad-except
# Hold on to any other exceptions while we try recording a final
# checkpoint and summary.
excinfo = sys.exc_info()
finally:
try:
# Call supervisor.Stop() from within a try block because it re-raises
# exceptions thrown by the supervised threads.
supervisor.Stop(close_summary_writer=False)
# Save one last checkpoint and summaries
# TODO(wicke): This should be handled by Supervisor
# In case we encountered an exception in the try block before we updated
# last_step, update it here (again).
last_step = get_current_step()
if supervisor_is_chief:
ckpt_path = supervisor.save_path
logging.info('Saving checkpoint for step %d to checkpoint: %s.',
last_step, ckpt_path)
supervisor.saver.save(session, ckpt_path, global_step=last_step)
# Finish monitors.
for monitor in monitors:
monitor.end()
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
'due to exhausted input queue. Note: summary_op is not '
'expected to trigger dequeues. %s.', e)
except BaseException as e: # pylint: disable=broad-except
# If we don't already have an exception to re-raise, raise this one.
if not excinfo:
raise
# Otherwise, log this one and raise the other in the finally block.
logging.error('Got exception during tf.learn final checkpoint %s.', e)
finally:
if excinfo:
reraise(*excinfo)
return loss_value
def _get_first_op_from_collection(collection_name):
elements = ops.get_collection(collection_name)
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _get_ready_op():
ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
return ready_op
def _get_local_init_op():
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(),
data_flow_ops.tables_initializer()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
def _eval_results_to_str(eval_results):
return ', '.join('%s = %s' % (k, v) for k, v in sorted(eval_results.items()))
def _write_summary_results(output_dir, eval_results, current_global_step):
"""Writes eval results into summary file in given dir."""
logging.info('Saving evaluation summary for step %d: %s', current_global_step,
_eval_results_to_str(eval_results))
summary_writer = get_summary_writer(output_dir)
summary = summary_pb2.Summary()
for key in eval_results:
if eval_results[key] is None:
continue
value = summary.value.add()
value.tag = key
if (isinstance(eval_results[key], np.float32) or
isinstance(eval_results[key], float)):
value.simple_value = float(eval_results[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary, current_global_step)
summary_writer.flush()
@_graph_action_deprecation
def evaluate(graph,
output_dir,
checkpoint_path,
eval_dict,
update_op=None,
global_step_tensor=None,
supervisor_master='',
log_every_steps=10,
feed_fn=None,
max_steps=None):
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
In each step of evaluation, all tensors in the `eval_dict` are evaluated, and
every `log_every_steps` steps, they are logged. At the very end of evaluation,
a summary is evaluated (finding the summary ops using `Supervisor`'s logic)
and written to `output_dir`.
Args:
graph: A `Graph` to train. It is expected that this graph is not in use
elsewhere.
output_dir: A string containing the directory to write a summary to.
checkpoint_path: A string containing the path to a checkpoint to restore.
Can be `None` if the graph doesn't require loading any variables.
eval_dict: A `dict` mapping string names to tensors to evaluate. It is
evaluated in every logging step. The result of the final evaluation is
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-input exception when the inputs are exhausted.
update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
supervisor_master: The master string to use when preparing the session.
log_every_steps: Integer. Output logs every `log_every_steps` evaluation
steps. The logs contain the `eval_dict` and timing information.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
max_steps: Integer. Evaluate `eval_dict` this many times.
Returns:
A tuple `(eval_results, global_step)`:
eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)
that are the result of running eval_dict in the last step. `None` if no
eval steps were run.
global_step: The global step this evaluation corresponds to.
Raises:
ValueError: if `output_dir` is empty.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
# Create or get summary op, global_step and saver.
saver = _get_saver()
local_init_op = _get_local_init_op()
ready_for_local_init_op = _get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
ready_op = _get_ready_op()
session_manager = session_manager_lib.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op)
session, initialized = session_manager.recover_session(
master=supervisor_master,
saver=saver,
checkpoint_dir=checkpoint_path)
# Start queue runners.
coord = coordinator.Coordinator()
threads = queue_runner.start_queue_runners(session, coord)
with session:
if not initialized:
logging.warning('Failed to initialize from %s.', checkpoint_path)
# TODO(ipolosukhin): This should be failing, but old code relies on that.
session.run(variables.global_variables_initializer())
if checkpoint_path:
_restore_from_checkpoint(session, graph, checkpoint_path, saver)
current_global_step = session.run(global_step_tensor)
eval_results = None
# TODO(amodei): Fix this to run through the eval set exactly once.
step = 0
eval_step = None
feed_dict = None
logging.info('Eval steps [%d,%s) for training step %d.', step,
'inf' if max_steps is None
else str(max_steps), current_global_step)
try:
try:
while (max_steps is None) or (step < max_steps):
step += 1
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
if update_op is not None:
session.run(update_op, feed_dict=feed_dict)
else:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# TODO(wicke): We should assert that the global step hasn't changed.
if step % log_every_steps == 0:
if eval_step is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
duration = time.time() - start_time
logging.info('Results after %d steps (%.3f sec/batch): %s.',
step, float(duration),
_eval_results_to_str(eval_results))
finally:
if eval_results is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# Stop session first, before queue runners.
session.close()
# Stop queue runners.
try:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
except (RuntimeError, errors.CancelledError) as e:
logging.warning('Coordinator didn\'t stop cleanly: %s', e)
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
if max_steps is None:
logging.info('Input queue is exhausted.')
else:
logging.warn('Input queue is exhausted: %s.', e)
# catch StopIteration which is thrown is DataReader is out of data.
except StopIteration as e:
if max_steps is None:
logging.info('Input iterator is exhausted.')
else:
logging.warn('Input iterator is exhausted: %s.', e)
# Save summaries for this evaluation.
_write_summary_results(output_dir, eval_results, current_global_step)
return eval_results, current_global_step
@_graph_action_deprecation
def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):
"""Run `output_dict` tensors `n` times, with the same `feed_dict` each run.
Args:
output_dict: A `dict` mapping string names to tensors to run. Must all be
from the same graph.
feed_dict: `dict` of input values to feed each run.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
n: Number of times to repeat.
Returns:
A list of `n` `dict` objects, each containing values read from `output_dict`
tensors.
"""
return run_feeds(
output_dict=output_dict,
feed_dicts=itertools.repeat(feed_dict, n),
restore_checkpoint_path=restore_checkpoint_path)
@_graph_action_deprecation
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
session.run(
resources.initialize_resources(resources.shared_resources() +
resources.local_resources()))
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.global_variables_initializer())
session.run(variables.local_variables_initializer())
session.run(data_flow_ops.tables_initializer())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
for f in feed_dicts:
yield session.run(output_dict, f)
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
@_graph_action_deprecation
def run_feeds(*args, **kwargs):
"""See run_feeds_iter(). Returns a `list` instead of an iterator."""
return list(run_feeds_iter(*args, **kwargs))
@_graph_action_deprecation
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
|
lmazuel/azure-sdk-for-python | refs/heads/master | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/virtual_network_peering_py3.py | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network.
:type remote_virtual_network:
~azure.mgmt.network.v2017_03_01.models.SubResource
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2017_03_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, allow_virtual_network_access: bool=None, allow_forwarded_traffic: bool=None, allow_gateway_transit: bool=None, use_remote_gateways: bool=None, remote_virtual_network=None, peering_state=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkPeering, self).__init__(id=id, **kwargs)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
jmartinm/invenio | refs/heads/master | modules/webstat/lib/webstat_engine.py | 14 | ## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import calendar, commands, datetime, time, os, cPickle, random, cgi
from operator import itemgetter
from invenio.config import CFG_TMPDIR, \
CFG_SITE_URL, \
CFG_SITE_NAME, \
CFG_BINDIR, \
CFG_CERN_SITE, \
CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, \
CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, \
CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, \
CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, \
CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, \
CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL, \
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE, \
CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED
from invenio.bibindex_tokenizers.BibIndexJournalTokenizer import CFG_JOURNAL_TAG
from invenio.urlutils import redirect_to_url
from invenio.search_engine import perform_request_search, \
get_collection_reclist, \
get_most_popular_field_values, \
search_pattern
from invenio.search_engine_utils import get_fieldvalues
from invenio.dbquery import run_sql, \
wash_table_column_name
from invenio.websubmitadmin_dblayer import get_docid_docname_alldoctypes
from invenio.bibcirculation_utils import book_title_from_MARC, \
book_information_from_MARC
from invenio.bibcirculation_dblayer import get_id_bibrec, \
get_borrower_data
from invenio.websearch_webcoll import CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE
from invenio.dateutils import convert_datetext_to_datestruct, convert_datestruct_to_dategui
from invenio.bibtask import get_modified_records_since
WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
# KEY EVENT TREND SECTION
def get_keyevent_trend_collection_population(args, return_sql=False):
"""
Returns the quantity of documents in Invenio for
the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
if args.get('collection', 'All') == 'All':
sql_query_g = _get_sql_query("creation_date", args['granularity'],
"bibrec")
sql_query_i = "SELECT COUNT(id) FROM bibrec WHERE creation_date < %s"
initial_quantity = run_sql(sql_query_i, (lower, ))[0][0]
return _get_keyevent_trend(args, sql_query_g, initial_quantity=initial_quantity,
return_sql=return_sql, sql_text=
"Previous count: %s<br />Current count: %%s" % (sql_query_i),
acumulative=True)
else:
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
g = get_keyevent_trend_new_records(args, return_sql, True)
sql_query_i = "SELECT id FROM bibrec WHERE creation_date < %s"
if return_sql:
return "Previous count: %s<br />Current count: %s" % (sql_query_i % lower, g)
initial_quantity = len(filter(lambda x: x[0] in ids, run_sql(sql_query_i, (lower, ))))
return _get_trend_from_actions(g, initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative=True)
def get_keyevent_trend_new_records(args, return_sql=False, only_action=False):
"""
Returns the number of new records uploaded during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("creation_date", args['granularity'],
"bibrec"),
return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("creation_date", args["granularity"], "bibrec",
extra_select=", id", group_by=False, count=False)
if return_sql:
return sql % (lower, upper)
recs = run_sql(sql, (lower, upper))
if recs:
def add_count(i_list, element):
""" Reduce function to create a dictionary with the count of ids
for each date """
if i_list and element == i_list[-1][0]:
i_list[-1][1] += 1
else:
i_list.append([element, 1])
return i_list
action_dates = reduce(add_count,
map(lambda x: x[0], filter(lambda x: x[1] in ids, recs)),
[])
else:
action_dates = []
if only_action:
return action_dates
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_search_frequency(args, return_sql=False):
"""
Returns the number of searches (of any kind) carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query"),
return_sql=return_sql)
def get_keyevent_trend_comments_frequency(args, return_sql=False):
"""
Returns the number of comments (of any kind) carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT")
else:
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT", conditions=
_get_collection_recids_for_sql_query(args['collection']))
return _get_keyevent_trend(args, sql, return_sql=return_sql)
def get_keyevent_trend_search_type_distribution(args, return_sql=False):
"""
Returns the number of searches carried out during the given
timestamp range, but also partion them by type Simple and
Advanced.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine all simple searches:
simple = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%p=%%'")
# SQL to determine all advanced searches:
advanced = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%as=1%%'")
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, simple,
return_sql=return_sql, sql_text="Simple: %s")
a_trend = _get_keyevent_trend(args, advanced,
return_sql=return_sql, sql_text="Advanced: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, a_trend)
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_trend_download_frequency(args, return_sql=False):
"""
Returns the number of full text downloads carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# Collect list of timestamps of insertion in the specific collection
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("download_time",
args["granularity"], "rnkDOWNLOADS"), return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("download_time", args["granularity"], "rnkDOWNLOADS",
extra_select=", GROUP_CONCAT(id_bibrec)")
if return_sql:
return sql % (lower, upper)
action_dates = []
for result in run_sql(sql, (lower, upper)):
count = result[1]
for id in result[2].split(","):
if id == '' or not int(id) in ids:
count -= 1
action_dates.append((result[0], count))
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_number_of_loans(args, return_sql=False):
"""
Returns the number of loans carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("loaned_on",
args["granularity"], "crcLOAN"), return_sql=return_sql)
def get_keyevent_trend_web_submissions(args, return_sql=False):
"""
Returns the quantity of websubmissions in Invenio for
the given timestamp range.
@param args['doctype']: A doctype name
@type args['doctype']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args['doctype'] == 'all':
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, return_sql=return_sql)
else:
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="doctype=%s AND action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, extra_param=[args['doctype']],
return_sql=return_sql)
return res
def get_keyevent_loan_statistics(args, return_sql=False):
"""
Data:
- Number of documents (=records) loaned
- Number of items loaned on the total number of items
- Number of items never loaned on the total number of items
- Average time between the date of the record creation and the date of the first loan
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by item status (available, missing)
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)"
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
param.append('%%%s%%' % args['creation_date'])
param = tuple(param)
# Number of loans:
loans_sql = "SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where
items_loaned_sql = "SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where
# Only the CERN site wants the items of the collection "Books & Proceedings"
if CFG_CERN_SITE:
items_in_book_coll = _get_collection_recids_for_sql_query("Books & Proceedings")
if items_in_book_coll == "":
total_items_sql = 0
else:
total_items_sql = "SELECT COUNT(*) FROM crcITEM WHERE %s" % \
items_in_book_coll
else: # The rest take all the items
total_items_sql = "SELECT COUNT(*) FROM crcITEM"
# Average time between the date of the record creation and the date of the first loan
avg_sql = "SELECT AVG(DATEDIFF(loaned_on, br.creation_date)) " + sql_from
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += ", bibrec br "
avg_sql += sql_where
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += "AND br.id=l.id_bibrec "
if return_sql:
return "<ol><li>%s</li><li>Items loaned * 100 / Number of items <ul><li>\
Items loaned: %s </li><li>Number of items: %s</li></ul></li><li>100 - Items \
loaned on total number of items</li><li>%s</li></ol>" % \
(loans_sql % param, items_loaned_sql % param, total_items_sql, avg_sql % param)
loans = run_sql(loans_sql, param)[0][0]
items_loaned = run_sql(items_loaned_sql, param)[0][0]
if total_items_sql:
total_items = run_sql(total_items_sql)[0][0]
else:
total_items = 0
if total_items == 0:
loaned_on_total = 0
never_loaned_on_total = 0
else:
# Number of items loaned on the total number of items:
loaned_on_total = float(items_loaned) * 100 / float(total_items)
# Number of items never loaned on the total number of items:
never_loaned_on_total = 100L - loaned_on_total
avg = run_sql(avg_sql, param)[0][0]
if avg:
avg = float(avg)
else:
avg = 0L
return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
def get_keyevent_loan_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of documents (= records) never loaned
- List of most loaned documents (columns: number of loans,
number of copies and the creation date of the record, in
order to calculate the number of loans by copy), sorted
by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by loan period (4 week loan, one week loan...)
- by a certain number of loans
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['loan_period']: 4 week loan, one week loan...
@type args['loan_period']: str
@param args['min_loan']: minimum number of loans
@type args['min_loan']: int
@param args['max_loan']: maximum number of loans
@type args['max_loan']: int
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_where = []
param = []
sql_from = ""
if 'udc' in args and args['udc'] != '':
sql_where.append("i." + _check_udc_value_where())
param.append(_get_udc_truncated(args['udc']))
if 'loan_period' in args and args['loan_period'] != '':
sql_where.append("loan_period = %s")
param.append(args['loan_period'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where.append("i.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)")
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br"
sql_where.append("br.id=i.id_bibrec AND br.creation_date LIKE %s")
param.append('%%%s%%' % args['creation_date'])
if sql_where:
sql_where = "WHERE %s AND" % " AND ".join(sql_where)
else:
sql_where = "WHERE"
param = tuple(param + [lower, upper])
# SQL for both queries
check_num_loans = "HAVING "
if 'min_loans' in args and args['min_loans'] != '':
check_num_loans += "COUNT(*) >= %s" % args['min_loans']
if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
if check_num_loans != "HAVING ":
check_num_loans += " AND "
check_num_loans += "COUNT(*) <= %s" % args['max_loans']
# Optimized to get all the data in only one query (not call get_fieldvalues several times)
mldocs_sql = "SELECT i.id_bibrec, COUNT(*) \
FROM crcLOAN l, crcITEM i%s %s l.barcode=i.barcode AND type = 'normal' AND \
loaned_on > %%s AND loaned_on < %%s GROUP BY i.id_bibrec %s" % \
(sql_from, sql_where, check_num_loans)
limit_n = ""
if limit > 0:
limit_n = "LIMIT %d" % limit
nldocs_sql = "SELECT id_bibrec, COUNT(*) FROM crcITEM i%s %s \
barcode NOT IN (SELECT id_bibrec FROM crcLOAN WHERE loaned_on > %%s AND \
loaned_on < %%s AND type = 'normal') GROUP BY id_bibrec ORDER BY COUNT(*) DESC %s" % \
(sql_from, sql_where, limit_n)
items_sql = "SELECT id_bibrec, COUNT(*) items FROM crcITEM GROUP BY id_bibrec"
creation_date_sql = "SELECT creation_date FROM bibrec WHERE id=%s"
authors_sql = "SELECT bx.value FROM bib10x bx, bibrec_bib10x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '100__a' AND bibx.id_bibrec=%s"
title_sql = "SELECT GROUP_CONCAT(bx.value SEPARATOR ' ') value FROM bib24x bx, bibrec_bib24x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE %s AND bibx.id_bibrec=%s GROUP BY bibx.id_bibrec"
edition_sql = "SELECT bx.value FROM bib25x bx, bibrec_bib25x AS bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '250__a' AND bibx.id_bibrec=%s"
if return_sql:
return "Most loaned: %s<br \>Never loaned: %s" % \
(mldocs_sql % param, nldocs_sql % param)
mldocs = run_sql(mldocs_sql, param)
items = dict(run_sql(items_sql))
order_m = []
for mldoc in mldocs:
order_m.append([mldoc[0], mldoc[1], items[mldoc[0]], \
float(mldoc[1]) / float(items[mldoc[0]])])
order_m = sorted(order_m, key=itemgetter(3))
order_m.reverse()
# Check limit values
if limit > 0:
order_m = order_m[:limit]
res = [("", "Title", "Author", "Edition", "Number of loans",
"Number of copies", "Date of creation of the record")]
for mldoc in order_m:
res.append(("Most loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', mldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (mldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (mldoc[0], ))),
mldoc[1], mldoc[2],
_check_empty_value(run_sql(creation_date_sql, (mldoc[0], )))))
nldocs = run_sql(nldocs_sql, param)
for nldoc in nldocs:
res.append(("Not loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', nldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (nldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (nldoc[0], ))),
0, items[nldoc[0]],
_check_empty_value(run_sql(creation_date_sql, (nldoc[0], )))))
# nldocs = run_sql(nldocs_sql, param_n)
return (res)
def get_keyevent_renewals_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most renewed items stored by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by collection
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['collection']: collection of the record
@type args['collection']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l, crcITEM i "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
filter_coll = False
if 'collection' in args and args['collection'] != '':
filter_coll = True
recid_list = get_collection_reclist(args['collection'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.id_bibrec, SUM(number_of_renewals) %s %s \
GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC %s" \
% (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Title", "Author", "Edition", "Number of renewals")]
for rec, renewals in run_sql(sql, param):
if filter_coll and rec not in recid_list:
continue
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
return (res)
def get_keyevent_returns_table(args, return_sql=False):
"""
Data:
- Number of overdue returns in a timespan
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Overdue returns:
sql = "SELECT COUNT(*) FROM crcLOAN l WHERE loaned_on > %s AND loaned_on < %s AND \
due_date < NOW() AND (returned_on IS NULL OR returned_on > due_date)"
if return_sql:
return sql % (lower, upper)
return ((run_sql(sql, (lower, upper))[0][0], ), )
def get_keyevent_trend_returns_percentage(args, return_sql=False):
"""
Returns the number of overdue returns and the total number of returns
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine overdue returns:
overdue = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL \
AND (returned_on IS NULL OR returned_on > due_date)",
dates_range_param="loaned_on")
# SQL to determine all returns:
total = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL",
dates_range_param="loaned_on")
# Compute the trend for both types
o_trend = _get_keyevent_trend(args, overdue,
return_sql=return_sql, sql_text="Overdue: %s")
t_trend = _get_keyevent_trend(args, total,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (o_trend, t_trend)
return [(o_trend[i][0], (o_trend[i][1], t_trend[i][1]))
for i in range(len(o_trend))]
def get_keyevent_ill_requests_statistics(args, return_sql=False):
"""
Data:
- Number of ILL requests
- Number of satisfied ILL requests 2 weeks after the date of request
creation on a timespan
- Average time between the date and the hour of the ill request
date and the date and the hour of the delivery item to the user
on a timespan
- Average time between the date and the hour the ILL request
was sent to the supplier and the date and hour of the
delivery item on a timespan
Filter by
- in a specified time span
- by type of document (book or article)
- by status of the request (= new, sent, etc.)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
requests_sql = "SELECT COUNT(*) %s %s" % (sql_from, sql_where)
satrequests_sql = "SELECT COUNT(*) %s %s \
AND arrival_date IS NOT NULL AND \
DATEDIFF(arrival_date, period_of_interest_from) < 14 " % (sql_from, sql_where)
avgdel_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, period_of_interest_from, arrival_date)) %s %s \
AND arrival_date IS NOT NULL" % (sql_from, sql_where)
avgsup_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, request_date, arrival_date)) %s %s \
AND arrival_date IS NOT NULL \
AND request_date IS NOT NULL" % (sql_from, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(requests_sql % param, satrequests_sql % param,
avgdel_sql % param, avgsup_sql % param)
# Number of requests:
requests = run_sql(requests_sql, param)[0][0]
# Number of satisfied ILL requests 2 weeks after the date of request creation:
satrequests = run_sql(satrequests_sql, param)[0][0]
# Average time between the date and the hour of the ill request date and
# the date and the hour of the delivery item to the user
avgdel = run_sql(avgdel_sql, param)[0][0]
if avgdel:
avgdel = float(avgdel)
else:
avgdel = 0
# Average time between the date and the hour the ILL request was sent to
# the supplier and the date and hour of the delivery item
avgsup = run_sql(avgsup_sql, param)[0][0]
if avgsup:
avgsup = float(avgsup)
else:
avgsup = 0
return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
def get_keyevent_ill_requests_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of ILL requests
Filter by
- in a specified time span
- by type of request (article or book)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of request (article or book)
@type args['doctype']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE status != '%s' AND request_date > %%s AND request_date < %%s " \
% CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s "
param.append(args['doctype'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT ill.id, item_info %s %s %s" % (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Id", "Title", "Author", "Edition")]
for req_id, item_info in run_sql(sql, param):
item_info = eval(item_info)
try:
res.append((req_id, item_info['title'], item_info['authors'], item_info['edition']))
except KeyError:
pass
return (res)
def get_keyevent_trend_satisfied_ill_requests_percentage(args, return_sql=False):
"""
Returns the number of satisfied ILL requests 2 weeks after the date of request
creation and the total number of ILL requests
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
sql_from = "crcILLREQUEST ill "
sql_where = ""
param = []
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append(CFG_BIBCIRCULATION_ILL_STATUS_CANCELLED)
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# SQL to determine satisfied ILL requests:
satisfied = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() AND \
(arrival_date IS NULL OR arrival_date < ADDDATE(request_date, 14)) " + sql_where)
# SQL to determine all ILL requests:
total = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() "+ sql_where)
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, satisfied, extra_param=param,
return_sql=return_sql, sql_text="Satisfied: %s")
t_trend = _get_keyevent_trend(args, total, extra_param=param,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, t_trend)
return [(s_trend[i][0], (s_trend[i][1], t_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_items_statistics(args, return_sql=False):
"""
Data:
- The total number of items
- Total number of new items added in last year
Filter by
- in a specified time span
- by collection
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'udc' in args and args['udc'] != '':
sql_where += "i." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
# Number of items:
if sql_where == "WHERE ":
sql_where = ""
items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
# Number of new items:
if sql_where == "":
sql_where = "WHERE creation_date > %s AND creation_date < %s "
else:
sql_where += " AND creation_date > %s AND creation_date < %s "
new_items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
if return_sql:
return "Total: %s <br />New: %s" % (items_sql % tuple(param), new_items_sql % tuple(param + [lower, upper]))
return ((run_sql(items_sql, tuple(param))[0][0], ), (run_sql(new_items_sql, tuple(param + [lower, upper]))[0][0], ))
def get_keyevent_items_lists(args, return_sql=False, limit=50):
"""
Lists:
- The list of items
Filter by
- by library (=physical location of the item)
- by status (=on loan, available, requested, missing...)
@param args['library']: physical location of the item
@type args[library'']: str
@param args['status']: on loan, available, requested, missing...
@type args['status']: str
"""
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'library' in args and args['library'] != '':
sql_from += ", crcLIBRARY li "
sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
param.append(args['library'])
if 'status' in args and args['status'] != '':
if sql_where != "WHERE ":
sql_where += "AND "
sql_where += "i.status = %s "
param.append(args['status'])
param = tuple(param)
# Results:
res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
if sql_where == "WHERE ":
sql_where = ""
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.barcode, i.id_bibrec %s %s %s" % (sql_from, sql_where, limit)
if len(param) == 0:
sqlres = run_sql(sql)
else:
sqlres = run_sql(sql, tuple(param))
sql = sql % param
if return_sql:
return sql
for barcode, rec in sqlres:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec),
author, edition, barcode,
book_information_from_MARC(int(rec))[1]))
return (res)
def get_keyevent_loan_request_statistics(args, return_sql=False):
"""
Data:
- Number of hold requests, one week after the date of request creation
- Number of successful hold requests transactions
- Average time between the hold request date and the date of delivery document in a year
Filter by
- in a specified time span
- by item status (available, missing)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
param = tuple(param)
custom_table = get_customevent_table("loanrequest")
# Number of hold requests, one week after the date of request creation:
holds = "SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND \
DATEDIFF(ws.creation_time, lr.request_date) >= 7" % (sql_from, custom_table, sql_where)
# Number of successful hold requests transactions
succesful_holds = "SELECT COUNT(*) %s %s AND lr.status='%s'" % (sql_from, sql_where,
CFG_BIBCIRCULATION_REQUEST_STATUS_DONE)
# Average time between the hold request date and the date of delivery document in a year
avg_sql = "SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date)) \
%s, %s ws %s AND ws.request_id=lr.id" % (sql_from, custom_table, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(holds % param, succesful_holds % param, avg_sql % param)
avg = run_sql(avg_sql, param)[0][0]
if avg is int:
avg = int(avg)
else:
avg = 0
return ((run_sql(holds, param)[0][0], ),
(run_sql(succesful_holds, param)[0][0], ), (avg, ))
def get_keyevent_loan_request_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of the most requested items
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND lr." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT lr.barcode %s %s GROUP BY barcode \
ORDER BY COUNT(*) DESC %s" % (sql_from, sql_where, limit)
if return_sql:
return sql
res = [("Title", "Author", "Edition", "Barcode")]
# Most requested items:
for barcode in run_sql(sql, param):
rec = get_id_bibrec(barcode[0])
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
return (res)
def get_keyevent_user_statistics(args, return_sql=False):
"""
Data:
- Total number of active users (to be defined = at least one transaction in the past year)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
param = (lower, upper, lower, upper)
# Total number of active users:
users = "SELECT COUNT(DISTINCT user) FROM ((SELECT id_crcBORROWER user %s %s) \
UNION (SELECT id_crcBORROWER user %s %s)) res" % \
(sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan)
if return_sql:
return users % param
return ((run_sql(users, param)[0][0], ), )
def get_keyevent_user_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most intensive users (ILL requests + Loan)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = (lower, upper, lower, upper)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT user, SUM(trans) FROM \
((SELECT id_crcBORROWER user, COUNT(*) trans FROM crcILLREQUEST ill \
WHERE request_date > %%s AND request_date < %%s GROUP BY id_crcBORROWER) UNION \
(SELECT id_crcBORROWER user, COUNT(*) trans FROM crcLOAN l WHERE loaned_on > %%s AND \
loaned_on < %%s GROUP BY id_crcBORROWER)) res GROUP BY user ORDER BY SUM(trans) DESC \
%s" % (limit)
if return_sql:
return sql % param
res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
# List of most intensive users (ILL requests + Loan):
for borrower_id, trans in run_sql(sql, param):
name, address, mailbox, email = get_borrower_data(borrower_id)
res.append((name, address, mailbox, email, int(trans)))
return (res)
# KEY EVENT SNAPSHOT SECTION
def get_keyevent_snapshot_uptime_cmd():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
return _run_cmd('uptime').strip().replace(' ', ' ')
def get_keyevent_snapshot_apache_processes():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
# The number of Apache processes (root+children)
return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
def get_keyevent_snapshot_bibsched_status():
"""
A specific implementation of get_current_event().
@return: Information about the number of tasks in the different status modes.
@type: [(str, int)]
"""
sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
return [(x[0], int(x[1])) for x in run_sql(sql)]
def get_keyevent_snapshot_sessions():
"""
A specific implementation of get_current_event().
@return: The current number of website visitors (guests, logged in)
@type: (int, int)
"""
# SQL to retrieve sessions in the Guests
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email = '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
guests = run_sql(sql)[0][0]
# SQL to retrieve sessions in the Logged in users
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email <> '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
logged_ins = run_sql(sql)[0][0]
# Assemble, according to return type
return (guests, logged_ins)
def get_keyevent_bibcirculation_report(freq='yearly'):
"""
Monthly and yearly report with the total number of circulation
transactions (loans, renewals, returns, ILL requests, hold request).
@param freq: yearly or monthly
@type freq: str
@return: loans, renewals, returns, ILL requests, hold request
@type: (int, int, int, int, int)
"""
if freq == 'monthly':
datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
else: #yearly
datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
loans, renewals = run_sql("SELECT COUNT(*), \
SUM(number_of_renewals) \
FROM crcLOAN WHERE loaned_on > %s", (datefrom, ))[0]
returns = run_sql("SELECT COUNT(*) FROM crcLOAN \
WHERE returned_on!='0000-00-00 00:00:00' and loaned_on > %s", (datefrom, ))[0][0]
illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
return (loans, renewals, returns, illrequests, holdrequest)
def get_last_updates():
"""
List date/time when the last updates where done (easy reading format).
@return: last indexing, last ranking, last sorting, last webcolling
@type: (datetime, datetime, datetime, datetime)
"""
try:
last_index = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM idxINDEX WHERE \
name="global"')[0][0])))
last_rank = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM rnkMETHOD ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
last_sort = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT last_updated FROM bsrMETHODDATA ORDER BY \
last_updated DESC LIMIT 1')[0][0])))
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(file_coll_last_update.read())))
file_coll_last_update.close()
# database not filled
except IndexError:
return ("", "", "", "")
return (last_index, last_rank, last_sort, last_coll)
def get_list_link(process, category=None):
"""
Builds the link for the list of records not indexed, ranked, sorted or
collected.
@param process: kind of process the records are waiting for (index, rank,
sort, collect)
@type process: str
@param category: specific sub-category of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount)
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: link text
@type: string
"""
if process == "index":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name=%s)', (category,))
elif process == "rank":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
rnkMETHOD WHERE name=%s)', (category,))
elif process == "sort":
list_registers = run_sql('SELECT id FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
bsrMETHODDATA WHERE id_bsrMETHOD=(SELECT id \
FROM bsrMETHOD WHERE name=%s))', (category,))
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
coll_last_update = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = zip(get_modified_records_since(coll_last_update).tolist())
# build the link
if len(list_registers) == 0:
return "Up to date"
link = '<a href="' + CFG_SITE_URL + '/search?p='
for register in list_registers:
link += 'recid%3A' + str(register[0]) + '+or+'
# delete the last '+or+'
link = link[:len(link)-4]
link += '">' + str(len(list_registers)) + '</a>'
return link
def get_search_link(record_id):
"""
Auxiliar, builds the direct link for a given record.
@param record_id: record's id number
@type record_id: int
@return: link text
@type: string
"""
link = '<a href="' + CFG_SITE_URL + '/record/' + \
str(record_id) + '">Record [' + str(record_id) + ']</a>'
return link
def get_ingestion_matching_records(request=None, limit=25):
"""
Fetches all the records matching a given pattern, arranges them by last
modificaton date and returns a list.
@param request: requested pattern to match
@type request: str
@return: list of records matching a pattern,
(0,) if no request,
(-1,) if the request was invalid
@type: list
"""
if request==None or request=="":
return (0,)
try:
records = list(search_pattern(p=request))
except:
return (-1,)
if records == []:
return records
# order by most recent modification date
query = 'SELECT id FROM bibrec WHERE '
for r in records:
query += 'id="' + str(r) + '" OR '
query = query[:len(query)-4]
query += ' ORDER BY modification_date DESC LIMIT %s'
list_records = run_sql(query, (limit,))
final_list = []
for lr in list_records:
final_list.append(lr[0])
return final_list
def get_record_ingestion_status(record_id):
"""
Returns the amount of ingestion methods not updated yet to a given record.
If 0, the record is up to date.
@param record_id: record id number
@type record_id: int
@return: number of methods not updated for the record
@type: int
"""
counter = 0
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")', (record_id, ))[0][0]
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
rnkMETHOD ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
counter = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT last_updated FROM \
bsrMETHODDATA ORDER BY last_updated DESC LIMIT 1)', \
(record_id, ))[0][0]
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_coll = file_coll_last_update.read()
file_coll_last_update.close()
counter += run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND \
modification_date >\
%s', (record_id, last_coll,))[0][0]
return counter
def get_specific_ingestion_status(record_id, process, method=None):
"""
Returns whether a record is or not up to date for a given
process and method.
@param record_id: identification number of the record
@type record_id: int
@param process: kind of process the records may be waiting for (index,
rank, sort, collect)
@type process: str
@param method: specific sub-method of the process.
Index: global, collection, abstract, author, keyword,
reference, reportnumber, title, fulltext, year,
journal, collaboration, affiliation, exactauthor,
caption, firstauthor, exactfirstauthor, authorcount
Rank: wrd, demo_jif, citation, citerank_citation_t,
citerank_pagerank_c, citerank_pagerank_t
Sort: latest first, title, author, report number,
most cited
Collect: Empty / None
@type category: str
@return: text: None if the record is up to date
Last time the method was updated if it is waiting
@type: date/time string
"""
exist = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s', (record_id, ))
if exist[0][0] == 0:
return "REG not in DB"
if process == "index":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM idxINDEX WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM idxINDEX WHERE \
name=%s', (method,))[0][0]
elif process == "rank":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM rnkMETHOD WHERE name=%s)',
(record_id, method,))
last_time = run_sql ('SELECT last_updated FROM rnkMETHOD WHERE \
name=%s', (method,))[0][0]
elif process == "sort":
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE \
id=%s AND modification_date > (SELECT \
last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s))', (record_id, method,))
last_time = run_sql ('SELECT last_updated FROM bsrMETHODDATA WHERE \
id_bsrMETHOD=(SELECT id FROM bsrMETHOD \
WHERE name=%s)', (method,))[0][0]
elif process == "collect":
file_coll_last_update = open(CFG_CACHE_LAST_UPDATED_TIMESTAMP_FILE, 'r')
last_time = file_coll_last_update.read()
file_coll_last_update.close()
list_registers = run_sql('SELECT COUNT(*) FROM bibrec WHERE id=%s \
AND modification_date > %s',
(record_id, last_time,))
# no results means the register is up to date
if list_registers[0][0] == 0:
return None
else:
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(last_time)))
def get_title_ingestion(record_id, last_modification):
"""
Auxiliar, builds a direct link for a given record, with its last
modification date.
@param record_id: id number of the record
@type record_id: string
@param last_modification: date/time of the last modification
@type last_modification: string
@return: link text
@type: string
"""
return '<h3><a href="%s/record/%s">Record [%s] last modification: %s</a></h3>' \
% (CFG_SITE_URL, record_id, record_id, last_modification)
def get_record_last_modification (record_id):
"""
Returns the date/time of the last modification made to a given record.
@param record_id: id number of the record
@type record_id: int
@return: date/time of the last modification
@type: string
"""
return convert_datestruct_to_dategui(convert_datetext_to_datestruct \
(str(run_sql('SELECT modification_date FROM bibrec \
WHERE id=%s', (record_id,))[0][0])))
def get_general_status():
"""
Returns an aproximate amount of ingestions processes not aplied to new or
updated records, using the "global" category.
@return: number of processes not updated
@type: int
"""
return run_sql('SELECT COUNT(*) FROM bibrec WHERE \
modification_date > (SELECT last_updated FROM \
idxINDEX WHERE name="global")')[0][0]
# ERROR LOG STATS
def update_error_log_analyzer():
"""Creates splitted files for today's errors"""
_run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
def get_invenio_error_log_ranking():
""" Returns the ranking of the errors in the invenio log"""
return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
def get_invenio_last_n_errors(nerr):
"""Returns the last nerr errors in the invenio log (without details)"""
return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
def get_invenio_error_details(error):
"""Returns the complete text of the invenio error."""
out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
return out
def get_apache_error_log_ranking():
""" Returns the ranking of the errors in the apache log"""
return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
# CUSTOM EVENT SECTION
def get_customevent_trend(args):
"""
Returns trend data for a custom event over a given
timestamp range.
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
tbl_name = get_customevent_table(args['event_id'])
col_names = get_customevent_args(args['event_id'])
where = []
sql_param = [lower, upper]
for col_bool, col_title, col_content in args['cols']:
if not col_title in col_names:
continue
if col_content:
if col_bool == "" or not where:
where.append(wash_table_column_name(col_title))
elif col_bool == "and":
where.append("AND %s"
% wash_table_column_name(col_title))
elif col_bool == "or":
where.append("OR %s"
% wash_table_column_name(col_title))
elif col_bool == "and_not":
where.append("AND NOT %s"
% wash_table_column_name(col_title))
else:
continue
where.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql = _get_sql_query("creation_time", args['granularity'], tbl_name, " ".join(where))
return _get_trend_from_actions(run_sql(sql, tuple(sql_param)), 0,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_customevent_dump(args):
"""
Similar to a get_event_trend implemention, but NO refining aka frequency
handling is carried out what so ever. This is just a dump. A dump!
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Get customevents
# events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
event_list = []
event_cols = {}
for event_id, i in [(args['ids'][i], str(i))
for i in range(len(args['ids']))]:
# Get all the event arguments and creation times
tbl_name = get_customevent_table(event_id)
col_names = get_customevent_args(event_id)
sql_query = ["SELECT * FROM %s WHERE creation_time > '%%s'" % wash_table_column_name(tbl_name), (lower,)] # kwalitee: disable=sql
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols' + i]:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s" % \
wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s" % \
wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s" % \
wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
res = run_sql(sql, tuple(sql_param))
for row in res:
event_list.append((row[1], event_id, row[2:]))
# Get the event col names
try:
event_cols[event_id] = cPickle.loads(run_sql(
"SELECT cols FROM staEVENT WHERE id = %s",
(event_id, ))[0][0])
except TypeError:
event_cols[event_id] = ["Unnamed"]
event_list.sort()
output = []
for row in event_list:
temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
arguments = ["%s: %s" % (event_cols[row[1]][i],
row[2][i]) for i in range(len(row[2]))]
temp.extend(arguments)
output.append(tuple(temp))
return output
def get_customevent_table(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event table name.
"""
res = run_sql(
"SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
try:
return res[0][0]
except IndexError:
# No such event table
return None
def get_customevent_args(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event argument (column) names.
"""
res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
try:
if res[0][0]:
return cPickle.loads(res[0][0])
else:
return []
except IndexError:
# No such event table
return None
# CUSTOM SUMMARY SECTION
def get_custom_summary_data(query, tag):
"""Returns the annual report data for the specified year
@param query: Search query to make customized report
@type query: str
@param tag: MARC tag for the output
@type tag: str
"""
# Check arguments
if tag == '':
tag = CFG_JOURNAL_TAG.replace("%", "p")
# First get records of the year
recids = perform_request_search(p=query, of="id", wl=0)
# Then return list by tag
pub = get_most_popular_field_values(recids, tag)
if len(pub) == 0:
return []
if CFG_CERN_SITE:
total = sum([x[1] for x in pub])
else:
others = 0
total = 0
first_other = -1
for elem in pub:
total += elem[1]
if elem[1] < 2:
if first_other == -1:
first_other = pub.index(elem)
others += elem[1]
del pub[first_other:]
if others != 0:
pub.append(('Others', others))
pub.append(('TOTAL', total))
return pub
def create_custom_summary_graph(data, path, title):
"""
Creates a pie chart with the information from the custom summary and
saves it in the file specified by the path argument
"""
# If no input, we don't bother about anything
if len(data) == 0:
return
os.environ['HOME'] = CFG_TMPDIR
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
return
# make a square figure and axes
matplotlib.rcParams['font.size'] = 8
labels = [x[0] for x in data]
numb_elem = len(labels)
width = 6 + float(numb_elem) / 7
gfile = plt.figure(1, figsize=(width, 6))
plt.axes([0.1, 0.1, 4.2 / width, 0.7])
numb = [x[1] for x in data]
total = sum(numb)
fracs = [x * 100 / total for x in numb]
colors = []
random.seed()
for i in range(numb_elem):
col = 0.5 + float(i) / (float(numb_elem) * 2.0)
rand = random.random() / 2.0
if i % 3 == 0:
red = col
green = col + rand
blue = col - rand
if green > 1.0:
green = 1
elif i % 3 == 1:
red = col - rand
green = col
blue = col + rand
if blue > 1.0:
blue = 1
elif i % 3 == 2:
red = col + rand
green = col - rand
blue = col
if red > 1.0:
red = 1
colors.append((red, green, blue))
patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
ttext = plt.title(title)
plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
legend_keywords = {"prop": {"size": "small"}}
plt.figlegend(patches, labels, 'lower right', **legend_keywords)
plt.savefig(path)
plt.close(gfile)
# GRAPHER
def create_graph_trend(trend, path, settings):
"""
Creates a graph representation out of data produced from get_event_trend.
@param trend: The trend data
@type trend: [(str, str|int|(str|int,...))]
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of graph parameters
@type settings: dict
"""
# If no input, we don't bother about anything
if not trend or len(trend) == 0:
return
# If no filename is given, we'll assume STD-out format and ASCII.
if path == '':
settings["format"] = 'asciiart'
if settings["format"] == 'asciiart':
create_graph_trend_ascii_art(trend, path, settings)
elif settings["format"] == 'gnuplot':
create_graph_trend_gnu_plot(trend, path, settings)
elif settings["format"] == "flot":
create_graph_trend_flot(trend, path, settings)
def create_graph_trend_ascii_art(trend, path, settings):
"""Creates the graph trend using ASCII art"""
out = ""
if settings["multiple"] is not None:
# Tokens that will represent the different data sets (maximum 16 sets)
# Set index (=100) to the biggest of the histogram sums
index = max([sum(x[1]) for x in trend])
# Print legend box
out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
else:
index = max([x[1] for x in trend])
width = 82
# Figure out the max length of the xtics, in order to left align
xtic_max_len = max([len(_to_datetime(x[0]).strftime(
settings["xtic_format"])) for x in trend])
for row in trend:
# Print the xtic
xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
try:
col_width = (1.0 * width / index)
except ZeroDivisionError:
col_width = 0
if settings["multiple"] is not None:
# The second value of the row-tuple, represents the n values from
# the n data sets. Each set, will be represented by a different
# ASCII character, chosen from the randomized string
# 'WEBSTAT_GRAPH_TOKENS'.
# NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
# sets are supported.
total = sum(row[1])
for i in range(len(row[1])):
col = row[1][i]
try:
out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
except ZeroDivisionError:
break
if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
out_row += out_row[-1]
else:
total = row[1]
try:
out_row += '-' * int(1.0 * total * col_width)
except ZeroDivisionError:
break
# Print sentinel, and the total
out += out_row + '>' + ' ' * (xtic_max_len + 4 +
width - len(out_row)) + str(total) + '\n'
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
def create_graph_trend_gnu_plot(trend, path, settings):
"""Creates the graph trend using the GNU plot library"""
try:
import Gnuplot
except ImportError:
return
gnup = Gnuplot.Gnuplot()
gnup('set style data steps')
if 'size' in settings:
gnup('set terminal png tiny size %s' % settings['size'])
else:
gnup('set terminal png tiny')
gnup('set output "%s"' % path)
if settings["title"] != '':
gnup.title(settings["title"].replace("\"", ""))
if settings["xlabel"] != '':
gnup.xlabel(settings["xlabel"])
if settings["ylabel"] != '':
gnup.ylabel(settings["ylabel"])
if settings["xtic_format"] != '':
xtics = 'set xtics ('
xtics += ', '.join(['"%s" %d' %
(_to_datetime(trend[i][0], '%Y-%m-%d \
%H:%M:%S').strftime(settings["xtic_format"]), i)
for i in range(len(trend))]) + ')'
gnup(xtics)
gnup('set format y "%.0f"')
# If we have multiple data sets, we need to do
# some magic to make Gnuplot eat it,
# This is basically a matrix transposition,
# and the addition of index numbers.
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
plot_items = []
y_max = 0
y_min = 0
for col in range(cols):
data = []
for row in range(rows):
data.append([row, trend[row][1][col]])
data.append([rows, trend[-1][1][col]])
plot_items.append(Gnuplot.PlotItems
.Data(data, title=settings["multiple"][col]))
tmp_max = max([x[col] for x in data])
tmp_min = min([x[col] for x in data])
if tmp_max > y_max:
y_max = tmp_max
if tmp_min < y_min:
y_min = tmp_min
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(*plot_items)
else:
data = [x[1] for x in trend]
data.append(trend[-1][1])
y_max = max(data)
y_min = min(data)
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(data)
def create_graph_trend_flot(trend, path, settings):
"""Creates the graph trend using the flot library"""
size = settings.get("size", "500,400").split(",")
title = cgi.escape(settings["title"].replace(" ", "")[:10])
out = """<!--[if IE]><script language="javascript" type="text/javascript"
src="%(site)s/js/excanvas.min.js"></script><![endif]-->
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.min.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.selection.min.js"></script>
<script id="source" language="javascript" type="text/javascript">
document.write('<div style="float:left"><div id="placeholder%(title)s" style="width:%(width)spx;height:%(height)spx"></div></div>'+
'<div id="miniature%(title)s" style="float:left;margin-left:20px;margin-top:50px">' +
'<div id="overview%(title)s" style="width:%(hwidth)dpx;height:%(hheigth)dpx"></div>' +
'<p id="overviewLegend%(title)s" style="margin-left:10px"></p>' +
'</div>');
$(function () {
function parseDate%(title)s(sdate){
var div1 = sdate.split(' ');
var day = div1[0].split('-');
var hour = div1[1].split(':');
return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime() - (new Date().getTimezoneOffset() * 60 * 1000) ;
}
function getData%(title)s() {""" % \
{'site': CFG_SITE_URL, 'width': size[0], 'height': size[1], 'hwidth': int(size[0]) / 2,
'hheigth': int(size[1]) / 2, 'title': title}
if(len(trend) > 1):
granularity_td = (_to_datetime(trend[1][0], '%Y-%m-%d %H:%M:%S') -
_to_datetime(trend[0][0], '%Y-%m-%d %H:%M:%S'))
else:
granularity_td = datetime.timedelta()
# Create variables with the format dn = [[x1,y1], [x2,y2]]
minx = trend[0][0]
maxx = trend[0][0]
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
first = 0
for col in range(cols):
out += """var d%d = [""" % (col)
for row in range(rows):
if(first == 0):
first = 1
else:
out += ", "
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d \
%H:%M:%S'), trend[row][1][col])
out += ", [parseDate%s('%s'), %d]];\n" % (title,
_to_datetime(maxx, '%Y-%m-%d %H:%M:%S')+ granularity_td,
trend[-1][1][col])
out += "return [\n"
first = 0
for col in range(cols):
if first == 0:
first = 1
else:
out += ", "
out += '{data : d%d, label : "%s"}' % \
(col, settings["multiple"][col])
out += "];\n}\n"
else:
out += """var d1 = ["""
rows = len(trend)
first = 0
for row in range(rows):
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
if first == 0:
first = 1
else:
out += ', '
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
trend[row][1])
out += """, [parseDate%s("%s"), %d]];
return [d1];
}
""" % (title, _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') +
granularity_td, trend[-1][1])
# Set options
tics = """yaxis: {
tickDecimals : 0
},"""
if settings["xtic_format"] != '':
current = _to_datetime(maxx, '%Y-%m-%d %H:%M:%S')
next = current + granularity_td
if (granularity_td.seconds + granularity_td.days * 24 * 3600) > 2592000:
next = current.replace(day=31)
tics += 'xaxis: { mode:"time",min:parseDate%s("%s"),max:parseDate%s("%s")},'\
% (title, _to_datetime(minx, '%Y-%m-%d %H:%M:%S'), title, next)
out += """var options%s ={
series: {
lines: { steps: true, fill: true},
points: { show: false }
},
legend: {show: false},
%s
grid: { hoverable: true, clickable: true },
selection: { mode: "xy" }
};
""" % (title, tics, )
# Write the plot method in javascript
out += """var startData%(title)s = getData%(title)s();
var plot%(title)s = $.plot($("#placeholder%(title)s"), startData%(title)s, options%(title)s);
// setup overview
var overview%(title)s = $.plot($("#overview%(title)s"), startData%(title)s, {
legend: { show: true, container: $("#overviewLegend%(title)s") },
series: {
lines: { steps: true, fill: true, lineWidth: 1},
shadowSize: 0
},
%(tics)s
grid: { color: "#999" },
selection: { mode: "xy" }
});
""" % {"title": title, "tics": tics}
# Tooltip and zoom
out += """
function showTooltip%(title)s(x, y, contents) {
$('<div id="tooltip%(title)s">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y - 5,
left: x + 10,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint%(title)s = null;
$("#placeholder%(title)s").bind("plothover", function (event, pos, item) {
if (item) {
if (previousPoint%(title)s != item.datapoint) {
previousPoint%(title)s = item.datapoint;
$("#tooltip%(title)s").remove();
var y = item.datapoint[1];
showTooltip%(title)s(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip%(title)s").remove();
previousPoint%(title)s = null;
}
});
$("#placeholder%(title)s").bind("plotclick", function (event, pos, item) {
if (item) {
plot%(title)s.highlight(item.series, item.datapoint);
}
});
// now connect the two
$("#placeholder%(title)s").bind("plotselected", function (event, ranges) {
// clamp the zooming to prevent eternal zoom
if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
// do the zooming
plot%(title)s = $.plot($("#placeholder%(title)s"), getData%(title)s(ranges.xaxis.from, ranges.xaxis.to),
$.extend(true, {}, options%(title)s, {
xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
}));
// don't fire event on the overview to prevent eternal loop
overview%(title)s.setSelection(ranges, true);
});
$("#overview%(title)s").bind("plotselected", function (event, ranges) {
plot%(title)s.setSelection(ranges);
});
});
</script>
<noscript>Your browser does not support JavaScript!
Please, select another output format</noscript>""" % {'title' : title}
open(path, 'w').write(out)
def get_numeric_stats(data, multiple):
""" Returns average, max and min values for data """
data = [x[1] for x in data]
if data == []:
return (0, 0, 0)
if multiple:
lists = []
for i in range(len(data[0])):
lists.append([x[i] for x in data])
return ([float(sum(x)) / len(x) for x in lists], [max(x) for x in lists],
[min(x) for x in lists])
else:
return (float(sum(data)) / len(data), max(data), min(data))
def create_graph_table(data, path, settings):
"""
Creates a html table representation out of data.
@param data: The data
@type data: (str,...)
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of table parameters
@type settings: dict
"""
out = """<table border="1">
"""
if settings['rows'] == []:
for row in data:
out += """<tr>
"""
for value in row:
out += """<td>%s</td>
""" % value
out += "</tr>"
else:
for dta, value in zip(settings['rows'], data):
out += """<tr>
<td>%s</td>
<td>
""" % dta
for vrow in value:
out += """%s<br />
""" % vrow
out = out[:-6] + "</td></tr>"
out += "</table>"
open(path, 'w').write(out)
def create_graph_dump(dump, path):
"""
Creates a graph representation out of data produced from get_event_trend.
@param dump: The dump data
@type dump: [(str|int,...)]
@param path: Where to store the graph
@type path: str
"""
out = ""
if len(dump) == 0:
out += "No actions for this custom event " + \
"are registered in the given time range."
else:
# Make every row in dump equally long, insert None if appropriate.
max_len = max([len(x) for x in dump])
events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
cols = ["Event", "Date and time"] + ["Argument %d" % i
for i in range(max_len - 2)]
column_widths = [max([len(str(x[i])) \
for x in events + [cols]]) + 3 for i in range(len(events[0]))]
for i in range(len(cols)):
out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
out += "\n"
for i in range(len(cols)):
out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
out += "\n\n"
for action in dump:
for i in range(len(action)):
if action[i] is None:
temp = ''
else:
temp = action[i]
out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
out += "\n"
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
# EXPORT DATA TO SLS
def get_search_frequency(day=datetime.datetime.now().date()):
"""Returns the number of searches performed in the chosen day"""
searches = get_keyevent_trend_search_type_distribution(get_args(day))
return sum(searches[0][1])
def get_total_records(day=datetime.datetime.now().date()):
"""Returns the total number of records which existed in the chosen day"""
tomorrow = (datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
try:
return get_keyevent_trend_collection_population(args)[0][1]
except IndexError:
return 0
def get_new_records(day=datetime.datetime.now().date()):
"""Returns the number of new records submitted in the chosen day"""
args = {'collection': CFG_SITE_NAME,
't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
't_format': "%Y-%m-%d"}
try:
return (get_total_records(day) -
get_keyevent_trend_collection_population(args)[0][1])
except IndexError:
return 0
def get_download_frequency(day=datetime.datetime.now().date()):
"""Returns the number of downloads during the chosen day"""
return get_keyevent_trend_download_frequency(get_args(day))[0][1]
def get_comments_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
def get_loans_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
def get_web_submissions(day=datetime.datetime.now().date()):
"""Returns the number of web submissions during the chosen day"""
args = get_args(day)
args['doctype'] = 'all'
return get_keyevent_trend_web_submissions(args)[0][1]
def get_alerts(day=datetime.datetime.now().date()):
"""Returns the number of alerts during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'alerts'
return get_customevent_trend(args)[0][1]
def get_journal_views(day=datetime.datetime.now().date()):
"""Returns the number of journal displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'journals'
return get_customevent_trend(args)[0][1]
def get_basket_views(day=datetime.datetime.now().date()):
"""Returns the number of basket displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'baskets'
return get_customevent_trend(args)[0][1]
def get_args(day):
"""Returns the most common arguments for the exporting to SLS methods"""
return {'t_start': day.strftime("%Y-%m-%d"),
't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
'granularity': "day", 't_format': "%Y-%m-%d"}
# EXPORTER
def export_to_python(data, req):
"""
Exports the data to Python code.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
_export("text/x-python", str(data), req)
def export_to_csv(data, req):
"""
Exports the data to CSV.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
_export('text/csv', '\n'.join(csv_list), req)
def export_to_file(data, req):
"""
Exports the data to a file.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
try:
import xlwt
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet('Sheet 1')
for row in range(0, len(data)):
for col in range(0, len(data[row])):
sheet1.write(row, col, "%s" % data[row][col])
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '') + '.xls'
book.save(filename)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
except ImportError:
csv_list = []
for row in data:
row = ['"%s"' % str(col) for col in row]
csv_list.append(",".join(row))
_export('text/csv', '\n'.join(csv_list), req)
# INTERNAL
def _export(mime, content, req):
"""
Helper function to pass on the export call. Create a
temporary file in which the content is stored, then let
redirect to the export web interface.
"""
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '')
open(filename, 'w').write(content)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), mime))
def _get_trend_from_actions(action_dates, initial_value,
t_start, t_end, granularity, dt_format, acumulative=False):
"""
Given a list of dates reflecting some sort of action/event, and some additional parameters,
an internal data format is returned. 'initial_value' set to zero, means that the frequency
will not be accumulative, but rather non-causal.
@param action_dates: A list of dates, indicating some sort of action/event.
@type action_dates: [datetime.datetime]
@param initial_value: The numerical offset the first action's value should make use of.
@type initial_value: int
@param t_start: Start time for the time domain in dt_format
@type t_start: str
@param t_end: End time for the time domain in dt_format
@type t_end: str
@param granularity: The granularity of the time domain, span between values.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' and 't_stop' parameters
@type dt_format: str
@return: A list of tuples zipping a time-domain and a value-domain
@type: [(str, int)]
"""
# Append the maximum date as a sentinel indicating we're done
action_dates = list(action_dates)
# Construct the datetime tuple for the stop time
stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
vector = [(None, initial_value)]
try:
upcoming_action = action_dates.pop()
#Do not count null values (when year, month or day is 0)
if granularity in ("year", "month", "day") and upcoming_action[0] == 0:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
# Create an iterator running from the first day of activity
for current in _get_datetime_iter(t_start, granularity, dt_format):
# Counter of action_dates in the current span, set the initial value to
# zero to avoid accumlation.
if acumulative:
actions_here = vector[-1][1]
else:
actions_here = 0
# Check to see if there's an action date in the current span
if upcoming_action[0] == {"year": current.year,
"month": current.month,
"day": current.day,
"hour": current.hour,
"minute": current.minute,
"second": current.second
}[granularity]:
actions_here += upcoming_action[1]
try:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
vector.append((current.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
# Make sure to stop the iteration at the end time
if {"year": current.year >= stop_at.year,
"month": current.month >= stop_at.month and current.year == stop_at.year,
"day": current.day >= stop_at.day and current.month == stop_at.month,
"hour": current.hour >= stop_at.hour and current.day == stop_at.day,
"minute": current.minute >= stop_at.minute and current.hour == stop_at.hour,
"second": current.second >= stop_at.second and current.minute == stop_at.minute
}[granularity]:
break
# Remove the first bogus tuple, and return
return vector[1:]
def _get_keyevent_trend(args, sql, initial_quantity=0, extra_param=[],
return_sql=False, sql_text='%s', acumulative=False):
"""
Returns the trend for the sql passed in the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = tuple([lower, upper] + extra_param)
if return_sql:
sql = sql % param
return sql_text % sql
return _get_trend_from_actions(run_sql(sql, param), initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative)
def _get_datetime_iter(t_start, granularity='day',
dt_format='%Y-%m-%d %H:%M:%S'):
"""
Returns an iterator over datetime elements starting at an arbitrary time,
with granularity of a [year,month,day,hour,minute,second].
@param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param granularity: The span between iterable elements, default is 'days'.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' parameter
@type dt_format: str
@return: An iterator of points in time
@type: iterator over datetime elements
"""
tim = _to_datetime(t_start, dt_format)
# Make a time increment depending on the granularity and the current time
# (the length of years and months vary over time)
span = ""
while True:
yield tim
if granularity == "year":
span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
elif granularity == "month":
span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
elif granularity == "day":
span = "days=1"
elif granularity == "hour":
span = "hours=1"
elif granularity == "minute":
span = "minutes=1"
elif granularity == "second":
span = "seconds=1"
else:
# Default just in case
span = "days=1"
tim += eval("datetime.timedelta(" + span + ")")
def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Transforms a string into a datetime
"""
return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
def _run_cmd(command):
"""
Runs a certain command and returns the string output. If the command is
not found a string saying so will be returned. Use with caution!
@param command: The UNIX command to execute.
@type command: str
@return: The std-out from the command.
@type: str
"""
return commands.getoutput(command)
def _get_doctypes():
"""Returns all the possible doctypes of a new submission"""
doctypes = [("all", "All")]
for doctype in get_docid_docname_alldoctypes():
doctypes.append(doctype)
return doctypes
def _get_item_statuses():
"""Returns all the possible status of an item"""
return [(CFG_BIBCIRCULATION_ITEM_STATUS_CANCELLED, "Cancelled"),
(CFG_BIBCIRCULATION_ITEM_STATUS_CLAIMED, "Claimed"),
(CFG_BIBCIRCULATION_ITEM_STATUS_IN_PROCESS, "In process"),
(CFG_BIBCIRCULATION_ITEM_STATUS_NOT_ARRIVED, "Not arrived"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_LOAN, "On loan"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_ORDER, "On order"),
(CFG_BIBCIRCULATION_ITEM_STATUS_ON_SHELF, "On shelf")] + \
[(status, status) for status in CFG_BIBCIRCULATION_ITEM_STATUS_OPTIONAL]
def _get_item_doctype():
"""Returns all the possible types of document for an item"""
dts = []
for dat in run_sql("""SELECT DISTINCT(request_type)
FROM crcILLREQUEST ORDER BY request_type ASC"""):
dts.append((dat[0], dat[0]))
return dts
def _get_request_statuses():
"""Returns all the possible statuses for an ILL request"""
dts = []
for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_libraries():
"""Returns all the possible libraries"""
dts = []
for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
if not CFG_CERN_SITE or not "CERN" in dat[0]: # do not add internal libraries for CERN site
dts.append((dat[0], dat[0]))
return dts
def _get_loan_periods():
"""Returns all the possible loan periods for an item"""
dts = []
for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_tag_name(tag):
"""
For a specific MARC tag, it returns the human-readable name
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag + '%',))
if res:
return res[0][0]
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag[:-1] + '%',))
if res:
return res[0][0]
return ''
def _get_collection_recids_for_sql_query(coll):
ids = get_collection_reclist(coll).tolist()
if len(ids) == 0:
return ""
return "id_bibrec IN %s" % str(ids).replace('[', '(').replace(']', ')')
def _check_udc_value_where():
return "id_bibrec IN (SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s) "
def _get_udc_truncated(udc):
if udc[-1] == '*':
return "%s%%" % udc[:-1]
if udc[0] == '*':
return "%%%s" % udc[1:]
return "%s" % udc
def _check_empty_value(value):
if len(value) == 0:
return ""
else:
return value[0][0]
def _get_granularity_sql_functions(granularity):
try:
return {
"year": ("YEAR",),
"month": ("YEAR", "MONTH",),
"day": ("MONTH", "DAY",),
"hour": ("DAY", "HOUR",),
"minute": ("HOUR", "MINUTE",),
"second": ("MINUTE", "SECOND")
}[granularity]
except KeyError:
return ("MONTH", "DAY",)
def _get_sql_query(creation_time_name, granularity, tables_from, conditions="",
extra_select="", dates_range_param="", group_by=True, count=True):
if len(dates_range_param) == 0:
dates_range_param = creation_time_name
conditions = "%s > %%s AND %s < %%s %s" % (dates_range_param, dates_range_param,
len(conditions) > 0 and "AND %s" % conditions or "")
values = {'creation_time_name': creation_time_name,
'granularity_sql_function': _get_granularity_sql_functions(granularity)[-1],
'count': count and ", COUNT(*)" or "",
'tables_from': tables_from,
'conditions': conditions,
'extra_select': extra_select,
'group_by': ""}
if group_by:
values['group_by'] = "GROUP BY "
for fun in _get_granularity_sql_functions(granularity):
values['group_by'] += "%s(%s), " % (fun, creation_time_name)
values['group_by'] = values['group_by'][:-2]
return "SELECT %(granularity_sql_function)s(%(creation_time_name)s) %(count)s %(extra_select)s \
FROM %(tables_from)s WHERE %(conditions)s \
%(group_by)s \
ORDER BY %(creation_time_name)s DESC" % values
|
iTeam-org/iteam-site | refs/heads/dev | iTeam/pages/views.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Adrien Chardon
# @Date: 2014-08-19 18:35:38
# @Last Modified by: Adrien Chardon
# @Last Modified time: 2014-12-04 16:35:31
# This file is part of iTeam.org.
# Copyright (C) 2014 Adrien Chardon (Nodraak).
#
# iTeam.org is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iTeam.org is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with iTeam.org. If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import render
from django.utils import timezone
from iTeam.publications.models import Publication
from iTeam.events.models import Event
def index(request):
return render(request, 'pages/index.html')
def home(request):
publications_list = Publication.objects.all().filter(pub_date__lte=timezone.now(), is_draft=False). \
order_by('-pub_date')[:5]
events_list = Event.objects.all().filter(is_draft=False). \
order_by('-date_start')[:5]
return render(request, 'home.html', {"publications_list": publications_list, 'events_list': events_list})
def apropos(request):
return render(request, 'pages/apropos.html')
def hallOfFame(request):
return render(request, 'pages/hallOfFame.html')
def cookies(request):
return render(request, 'pages/cookies.html')
def links(request):
return render(request, 'pages/links.html')
def md(request):
return render(request, 'pages/md.html')
|
slevenhagen/odoo-npg | refs/heads/8.0 | addons/base_report_designer/plugin/openerp_report_designer/bin/script/ExportToRML.py | 293 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2013 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import os
import uno
import unohelper
import string
import tempfile
import base64
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
from lib.error import *
from lib.tools import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 3
class ExportToRML( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
global url
self.sock=RPCSession(url)
# Read Data from sxw file
tmpsxw = tempfile.mktemp('.'+"sxw")
if not doc.hasLocation():
mytype = Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),)
doc.storeAsURL("file://"+tmpsxw,mytype)
data = read_data_from_file( get_absolute_file_path( doc.getURL()[7:] ) )
file_type = doc.getURL()[7:].split(".")[-1]
if docinfo.getUserFieldValue(2) == "":
ErrorDialog("Please Save this file on server","Use Send To Server Option in Odoo Report Menu","Error")
exit(1)
filename = self.GetAFileName()
if not filename:
exit(1)
global passwd
self.password = passwd
try:
res = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'sxwtorml',base64.encodestring(data),file_type)
if res['report_rml_content']:
write_data_to_file(get_absolute_file_path(filename), res['report_rml_content'])
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ExportToRML',LOG_ERROR, info)
ErrorDialog("Cannot save the file to the hard drive.", "Exception: %s." % e, "Error" )
def GetAFileName(self):
sFilePickerArgs = Array(10)
oFileDialog = createUnoService("com.sun.star.ui.dialogs.FilePicker")
oFileDialog.initialize(sFilePickerArgs)
oFileDialog.appendFilter("Odoo Report File Save To ....","*.rml")
f_path = "OpenERP-"+ os.path.basename( tempfile.mktemp("","") ) + ".rml"
initPath = tempfile.gettempdir()
oUcb = createUnoService("com.sun.star.ucb.SimpleFileAccess")
if oUcb.exists(initPath):
oFileDialog.setDisplayDirectory('file://' + ( os.name == 'nt' and '/' or '' ) + initPath )
oFileDialog.setDefaultName(f_path )
sPath = oFileDialog.execute() == 1 and oFileDialog.Files[0] or ''
oFileDialog.dispose()
sPath = sPath[7:]
if sPath.startswith('localhost/'):
slash = int(os.name == 'nt')
sPath = sPath[9 + slash:]
return sPath
if __name__<>"package" and __name__=="__main__":
ExportToRML(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( ExportToRML, "org.openoffice.openerp.report.exporttorml", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
eugene1g/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/watchlist/filenamepattern.py | 134 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class FilenamePattern:
def __init__(self, compiled_regex):
self._regex = compiled_regex
def match(self, path, diff_file):
return self._regex.match(path)
|
jagguli/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/contrib/localflavor/cz/forms.py | 72 | """
Czech-specific form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Select, RegexField, Field
from django.utils.translation import ugettext_lazy as _
import re
birth_number = re.compile(r'^(?P<birth>\d{6})/?(?P<id>\d{3,4})$')
ic_number = re.compile(r'^(?P<number>\d{7})(?P<check>\d)$')
class CZRegionSelect(Select):
"""
A select widget widget with list of Czech regions as choices.
"""
def __init__(self, attrs=None):
from cz_regions import REGION_CHOICES
super(CZRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class CZPostalCodeField(RegexField):
"""
A form field that validates its input as Czech postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, *args, **kwargs):
super(CZPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(CZPostalCodeField, self).clean(value)
return v.replace(' ', '')
class CZBirthNumberField(Field):
"""
Czech birth number field.
"""
default_error_messages = {
'invalid_format': _(u'Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.'),
'invalid_gender': _(u'Invalid optional parameter Gender, valid values are \'f\' and \'m\''),
'invalid': _(u'Enter a valid birth number.'),
}
def clean(self, value, gender=None):
super(CZBirthNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(birth_number, value)
if not match:
raise ValidationError(self.error_messages['invalid_format'])
birth, id = match.groupdict()['birth'], match.groupdict()['id']
# Three digits for verificatin number were used until 1. january 1954
if len(id) == 3:
return u'%s' % value
# Birth number is in format YYMMDD. Females have month value raised by 50.
# In case that all possible number are already used (for given date),
# the month field is raised by 20.
if gender is not None:
if gender == 'f':
female_const = 50
elif gender == 'm':
female_const = 0
else:
raise ValidationError(self.error_messages['invalid_gender'])
month = int(birth[2:4]) - female_const
if (not 1 <= month <= 12):
if (not 1 <= (month - 20) <= 12):
raise ValidationError(self.error_messages['invalid'])
day = int(birth[4:6])
if not (1 <= day <= 31):
raise ValidationError(self.error_messages['invalid'])
# Fourth digit has been added since 1. January 1954.
# It is modulo of dividing birth number and verification number by 11.
# If the modulo were 10, the last number was 0 (and therefore, the whole
# birth number wasn't divisable by 11. These number are no longer used (since 1985)
# and the condition 'modulo == 10' can be removed in 2085.
modulo = int(birth + id[:3]) % 11
if (modulo == int(id[-1])) or (modulo == 10 and id[-1] == '0'):
return u'%s' % value
else:
raise ValidationError(self.error_messages['invalid'])
class CZICNumberField(Field):
"""
Czech IC number field.
"""
default_error_messages = {
'invalid': _(u'Enter a valid IC number.'),
}
def clean(self, value):
super(CZICNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(ic_number, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
number, check = match.groupdict()['number'], int(match.groupdict()['check'])
sum = 0
weight = 8
for digit in number:
sum += int(digit)*weight
weight -= 1
remainder = sum % 11
# remainder is equal:
# 0 or 10: last digit is 1
# 1: last digit is 0
# in other case, last digin is 11 - remainder
if (not remainder % 10 and check == 1) or \
(remainder == 1 and check == 0) or \
(check == (11 - remainder)):
return u'%s' % value
raise ValidationError(self.error_messages['invalid'])
|
loumatrix/android_kernel_asus_me301t | refs/heads/android-5.1 | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
ProjectFacet/facet | refs/heads/master | project/editorial/migrations/0091_auto_20190512_1710.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2019-05-13 00:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editorial', '0090_auto_20190512_1558'),
]
operations = [
migrations.RemoveField(
model_name='organizationdiscoveryprofile',
name='platforms',
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_cable_tv',
field=models.BooleanField(default=False, help_text=b'Organization airs on cable television.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_network_tv',
field=models.BooleanField(default=False, help_text=b'Organization airs on network television.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_newsletter',
field=models.BooleanField(default=False, help_text=b'Organization publishes newsletters.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_online',
field=models.BooleanField(default=False, help_text=b'Organization publishes online.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_podcast',
field=models.BooleanField(default=False, help_text=b'Organization produces podcasts.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_print',
field=models.BooleanField(default=False, help_text=b'Organization publishes in print.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_radio',
field=models.BooleanField(default=False, help_text=b'Organization airs on radio.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_social',
field=models.BooleanField(default=False, help_text=b'Organization publishes content on social platforms.'),
),
migrations.AddField(
model_name='organizationdiscoveryprofile',
name='platform_streaming_video',
field=models.BooleanField(default=False, help_text=b'Organization content airs on streaming video.'),
),
]
|
jiadaizhao/LeetCode | refs/heads/master | 0001-0100/0011-Container With Most Water/0011-Container With Most Water.py | 1 | class Solution:
def maxArea(self, height: List[int]) -> int:
left = 0
right = len(height) - 1
maxA = 0
while left < right:
maxA = max(maxA, min(height[left], height[right])*(right - left))
if height[left] <= height[right]:
left += 1
else:
right -= 1
return maxA
|
MTgeophysics/mtpy | refs/heads/develop | mtpy/gui/SmartMT/utils/file_handler.py | 1 | """
Description:
todo to be written
Usage:
todo tobe written
Author: YingzhiGou
Date: 20/06/2017
"""
import os
import mtpy.core.mt as mt
from mtpy.utils.mtpylog import MtPyLog
DEFAULT_GROUP_PREFIX = 'Group'
DEFAULT_GROUP = "Default Group"
class FileHandler():
"""
Description:
container that holds all file references and MT object created from the files
"""
def __init__(self):
self._station_dict = dict()
self._logger = MtPyLog.get_mtpy_logger(self.__class__.__name__)
self._file_dict = dict()
self._group_dict = dict()
self._file_to_groups = dict()
def add_file(self, file_name, group_id=None):
"""
:param file_name:
:param group_id:
:type file_name: str
:type group_id: str
:return:
"""
file_ref = mt_obj = None
if isinstance(file_name, str):
if os.path.isfile(file_name):
if file_name in self._file_dict and self._file_dict[file_name] is not None:
self._logger.warning("File %s already loaded." % file_name)
file_ref = file_name
mt_obj = self.get_MT_obj(file_name)
else:
file_ref = file_name
self._logger.info("loading %s" % file_name)
mt_obj = mt.MT(file_name)
elif isinstance(file_name, mt.MT):
mt_obj = file_name
file_ref = mt_obj.fn
else:
raise FileHandlingException("Unsupported input type %s" % type(file_name))
# add file in to container
self._logger.info("referencing %s to %s" % (file_ref, mt_obj.station))
if file_ref not in self._file_dict:
self._file_dict[file_ref] = mt_obj
if mt_obj.station in self._station_dict:
raise FileHandlingException("Station %s already loaded from %s, you could try to unload this first" %
(mt_obj.station, self.station2ref(mt_obj.station)))
else:
self._station_dict[mt_obj.station] = (file_ref)
self._file_to_groups[file_ref] = set()
# add file to group
return self.add_to_group(group_id, file_ref)
def add_files(self, file_list, group_id=None):
for file_name in file_list:
self.add_file(file_name, group_id)
return True
def station2ref(self, station):
if station in self._station_dict:
return self._station_dict[station]
else:
return None
def add_to_group(self, group_ids, file_ref):
"""
add a file ref to a group
:param group_id:
:type group_id str
:param file_ref:
:type file_ref str
:return: True
"""
if not group_ids:
group_ids = [DEFAULT_GROUP]
elif isinstance(group_ids, str):
group_ids = [group_ids]
for group_id in group_ids:
if isinstance(group_id, str):
if group_id not in self._group_dict:
self._group_dict[group_id] = set()
if file_ref in self._file_dict:
self._logger.info("adding %s to group \"%s\"" % (self._file_dict[file_ref].station, group_id))
self._group_dict[group_id].add(file_ref)
self._file_to_groups[file_ref].add(group_id)
self._logger.info("%s now in group %s" % (self._file_dict[file_ref].station, ", ".join(self._file_to_groups[file_ref])))
else:
self._logger.error("File %s has not yet been loaded." % file_ref)
return False
else:
self._logger.warning("Unsupported group ID \"%s\", add file %s to \"%s\"" % (
type(group_id), file_ref, DEFAULT_GROUP))
return self.add_to_group(DEFAULT_GROUP, file_ref)
return True
def remove_file_from_group(self, group_id, file_ref):
"""
:param group_id:
:type group_id str
:param file_ref:
:type file_ref str
:return:
"""
self._logger.info("Remove %s from group %s" % (self.get_MT_obj(file_ref).station, group_id))
if group_id in self._group_dict:
try:
self._group_dict[group_id].remove(file_ref)
self._file_to_groups[file_ref].remove(group_id)
if not self._file_to_groups[file_ref]:
self.add_to_group(DEFAULT_GROUP, file_ref)
return True
except KeyError:
return False
return False
def unload(self, file_ref):
self._logger.info("unload %s" % file_ref)
for group in self._file_to_groups[file_ref]:
self._group_dict[group].remove(file_ref)
station = self.get_MT_obj(file_ref).station
del self._station_dict[station]
del self._file_to_groups[file_ref]
del self._file_dict[file_ref]
def remove_group(self, group_id):
self._logger.info("Remove group %s" % group_id)
members = self.get_group_members(group_id)
if members is not None:
for ref in members:
self._file_to_groups[ref].remove(group_id)
if not self._file_to_groups[ref]:
self.add_to_group(DEFAULT_GROUP, ref)
del self._group_dict[group_id]
def get_groups(self):
return list(self._group_dict.keys())
# properties
def get_group_members(self, group):
if group in self._group_dict:
return self._group_dict[group]
else:
self._logger.error("Group \"%s\" does not exist." % group)
return None
def get_MT_obj(self, ref):
if ref in self._file_dict:
return self._file_dict[ref]
else:
self._logger.warning("File \"%s\" is not loaded" % ref)
return None
def get_file_refs(self):
return list(self._file_dict.keys())
def create_group(self, group_id):
if group_id not in self._group_dict:
self._group_dict[group_id] = set()
return True
else:
self._logger.warning("Group %s exists!" % group_id)
return False
class FileHandlingException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
|
rshipp/python-appassure | refs/heads/development | appassure/core/ICoreCallbackManagement.py | 1 | """AppAssure 5 Core API"""
from appassure.api import AppAssureAPI
class ICoreCallbackManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/ICoreCallbackManagement
"""
def processAgentProtectionRequest(self, data):
"""The method is called by failover agent in order to
perform remote pairing. This method is for internal usage
only.
"""
return self.session.request('corecallback/agentprotectionrequest', 'POST',
self.getXML(data, 'coreCallbackRequest'))
def verifyConnect(self):
"""Called by agent to verify connectivity to this core."""
return self.session.request('corecallback/connect')
|
fqez/JdeRobot | refs/heads/master | src/libs/comm_py/tests/testMotors.py | 7 | #!/usr/bin/env python3
import config
import comm
import sys
import time
import signal
from jderobotTypes import CMDVel
if __name__ == '__main__':
cfg = config.load(sys.argv[1])
jdrc= comm.init(cfg, "Test")
vel = CMDVel()
vel.vx = 1
vel.az = 0.1
client = jdrc.getMotorsClient("Test.Motors")
for i in range (10):
client.sendVelocities(vel)
time.sleep(1)
jdrc.destroy() |
polyrabbit/WeCron | refs/heads/master | WeCron/wxhook/models.py | 1 | #coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.db import models
# Create your models here.
|
kalahbrown/HueBigSQL | refs/heads/master | desktop/core/ext-py/Paste-2.0.1/tests/test_exceptions/test_httpexceptions.py | 47 | # (c) 2005 Ian Bicking, Clark C. Evans and contributors
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
WSGI Exception Middleware
Regression Test Suite
"""
from nose.tools import assert_raises
from paste.httpexceptions import *
from paste.response import header_value
import six
def test_HTTPMove():
""" make sure that location is a mandatory attribute of Redirects """
assert_raises(AssertionError, HTTPFound)
assert_raises(AssertionError, HTTPTemporaryRedirect,
headers=[('l0cation','/bing')])
assert isinstance(HTTPMovedPermanently("This is a message",
headers=[('Location','/bing')])
,HTTPRedirection)
assert isinstance(HTTPUseProxy(headers=[('LOCATION','/bing')])
,HTTPRedirection)
assert isinstance(HTTPFound('/foobar'),HTTPRedirection)
def test_badapp():
""" verify that the middleware handles previously-started responses """
def badapp(environ, start_response):
start_response("200 OK",[])
raise HTTPBadRequest("Do not do this at home.")
newapp = HTTPExceptionHandler(badapp)
assert b'Bad Request' in b''.join(newapp({'HTTP_ACCEPT': 'text/html'},
(lambda a, b, c=None: None)))
def test_unicode():
""" verify unicode output """
tstr = u"\0xCAFE"
def badapp(environ, start_response):
start_response("200 OK",[])
raise HTTPBadRequest(tstr)
newapp = HTTPExceptionHandler(badapp)
assert tstr.encode("utf-8") in b''.join(newapp({'HTTP_ACCEPT':
'text/html'},
(lambda a, b, c=None: None)))
assert tstr.encode("utf-8") in b''.join(newapp({'HTTP_ACCEPT':
'text/plain'},
(lambda a, b, c=None: None)))
def test_template():
""" verify that html() and plain() output methods work """
e = HTTPInternalServerError()
e.template = 'A %(ping)s and <b>%(pong)s</b> message.'
assert str(e).startswith("500 Internal Server Error")
assert e.plain({'ping': 'fun', 'pong': 'happy'}) == (
'500 Internal Server Error\r\n'
'A fun and happy message.\r\n')
assert '<p>A fun and <b>happy</b> message.</p>' in \
e.html({'ping': 'fun', 'pong': 'happy'})
def test_redapp():
""" check that redirect returns the correct, expected results """
saved = []
def saveit(status, headers, exc_info = None):
saved.append((status,headers))
def redapp(environ, start_response):
raise HTTPFound("/bing/foo")
app = HTTPExceptionHandler(redapp)
result = list(app({'HTTP_ACCEPT': 'text/html'},saveit))
assert b'<a href="/bing/foo">' in result[0]
assert "302 Found" == saved[0][0]
if six.PY3:
assert "text/html; charset=utf8" == header_value(saved[0][1], 'content-type')
else:
assert "text/html" == header_value(saved[0][1], 'content-type')
assert "/bing/foo" == header_value(saved[0][1],'location')
result = list(app({'HTTP_ACCEPT': 'text/plain'},saveit))
assert "text/plain; charset=utf8" == header_value(saved[1][1],'content-type')
assert "/bing/foo" == header_value(saved[1][1],'location')
def test_misc():
assert get_exception(301) == HTTPMovedPermanently
redirect = HTTPFound("/some/path")
assert isinstance(redirect,HTTPException)
assert isinstance(redirect,HTTPRedirection)
assert not isinstance(redirect,HTTPError)
notfound = HTTPNotFound()
assert isinstance(notfound,HTTPException)
assert isinstance(notfound,HTTPError)
assert isinstance(notfound,HTTPClientError)
assert not isinstance(notfound,HTTPServerError)
notimpl = HTTPNotImplemented()
assert isinstance(notimpl,HTTPException)
assert isinstance(notimpl,HTTPError)
assert isinstance(notimpl,HTTPServerError)
assert not isinstance(notimpl,HTTPClientError)
|
OCA/stock-logistics-workflow | refs/heads/12.0 | stock_picking_invoice_link/migrations/12.0.2.0.0/post-migrate.py | 1 | # Copyright 2019 Sergio Teruel <sergio.teruel@tecnativa.com>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
# Convert invoice_line_id Many2one field to Many2many field if relation
# table is empty.
# This is due to a commit from v11 done after v12 migration, so is possible
# that this field is already converted
sql = "SELECT COUNT(*) FROM stock_move_invoice_line_rel;"""
env.cr.execute(sql)
if not env.cr.fetchone():
openupgrade.m2o_to_x2m(env.cr, env['stock.move'], 'stock_move',
'invoice_line_ids', 'invoice_line_id')
|
odicraig/kodi2odi | refs/heads/master | addons/plugin.video.ZemTV-shani/websocket/tests/test_websocket.py | 57 | # -*- coding: utf-8 -*-
#
import six
import sys
sys.path[0:0] = [""]
import os
import os.path
import base64
import socket
try:
from ssl import SSLError
except ImportError:
# dummy class of SSLError for ssl none-support environment.
class SSLError(Exception):
pass
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
import unittest2 as unittest
else:
import unittest
import uuid
if six.PY3:
from base64 import decodebytes as base64decode
else:
from base64 import decodestring as base64decode
# websocket-client
import websocket as ws
from websocket._handshake import _create_sec_websocket_key
from websocket._url import parse_url, get_proxy_info
from websocket._utils import validate_utf8
from websocket._handshake import _validate as _validate_header
from websocket._http import read_headers
# Skip test to access the internet.
TEST_WITH_INTERNET = os.environ.get('TEST_WITH_INTERNET', '0') == '1'
# Skip Secure WebSocket test.
TEST_SECURE_WS = True
TRACABLE = False
def create_mask_key(n):
return "abcd"
class SockMock(object):
def __init__(self):
self.data = []
self.sent = []
def add_packet(self, data):
self.data.append(data)
def recv(self, bufsize):
if self.data:
e = self.data.pop(0)
if isinstance(e, Exception):
raise e
if len(e) > bufsize:
self.data.insert(0, e[bufsize:])
return e[:bufsize]
def send(self, data):
self.sent.append(data)
return len(data)
def close(self):
pass
class HeaderSockMock(SockMock):
def __init__(self, fname):
SockMock.__init__(self)
path = os.path.join(os.path.dirname(__file__), fname)
with open(path, "rb") as f:
self.add_packet(f.read())
class WebSocketTest(unittest.TestCase):
def setUp(self):
ws.enableTrace(TRACABLE)
def tearDown(self):
pass
def testDefaultTimeout(self):
self.assertEqual(ws.getdefaulttimeout(), None)
ws.setdefaulttimeout(10)
self.assertEqual(ws.getdefaulttimeout(), 10)
ws.setdefaulttimeout(None)
def testParseUrl(self):
p = parse_url("ws://www.example.com/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/r/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080/")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("ws://www.example.com:8080")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/")
self.assertEqual(p[3], False)
p = parse_url("wss://www.example.com:8080/r")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://www.example.com:8080/r?key=value")
self.assertEqual(p[0], "www.example.com")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r?key=value")
self.assertEqual(p[3], True)
self.assertRaises(ValueError, parse_url, "http://www.example.com/r")
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
return
p = parse_url("ws://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 80)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("ws://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], False)
p = parse_url("wss://[2a03:4000:123:83::3]/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 443)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
p = parse_url("wss://[2a03:4000:123:83::3]:8080/r")
self.assertEqual(p[0], "2a03:4000:123:83::3")
self.assertEqual(p[1], 8080)
self.assertEqual(p[2], "/r")
self.assertEqual(p[3], True)
def testWSKey(self):
key = _create_sec_websocket_key()
self.assertTrue(key != 24)
self.assertTrue(six.u("¥n") not in key)
def testWsUtils(self):
key = "c6b8hTg4EeGb2gQMztV1/g=="
required_header = {
"upgrade": "websocket",
"connection": "upgrade",
"sec-websocket-accept": "Kxep+hNu9n51529fGidYu7a3wO0=",
}
self.assertEqual(_validate_header(required_header, key, None), (True, None))
header = required_header.copy()
header["upgrade"] = "http"
self.assertEqual(_validate_header(header, key, None), (False, None))
del header["upgrade"]
self.assertEqual(_validate_header(header, key, None), (False, None))
header = required_header.copy()
header["connection"] = "something"
self.assertEqual(_validate_header(header, key, None), (False, None))
del header["connection"]
self.assertEqual(_validate_header(header, key, None), (False, None))
header = required_header.copy()
header["sec-websocket-accept"] = "something"
self.assertEqual(_validate_header(header, key, None), (False, None))
del header["sec-websocket-accept"]
self.assertEqual(_validate_header(header, key, None), (False, None))
header = required_header.copy()
header["sec-websocket-protocol"] = "sub1"
self.assertEqual(_validate_header(header, key, ["sub1", "sub2"]), (True, "sub1"))
self.assertEqual(_validate_header(header, key, ["sub2", "sub3"]), (False, None))
header = required_header.copy()
header["sec-websocket-protocol"] = "sUb1"
self.assertEqual(_validate_header(header, key, ["Sub1", "suB2"]), (True, "sub1"))
def testReadHeader(self):
status, header = read_headers(HeaderSockMock("data/header01.txt"))
self.assertEqual(status, 101)
self.assertEqual(header["connection"], "upgrade")
HeaderSockMock("data/header02.txt")
self.assertRaises(ws.WebSocketException, read_headers, HeaderSockMock("data/header02.txt"))
def testSend(self):
# TODO: add longer frame data
sock = ws.WebSocket()
sock.set_mask_key(create_mask_key)
s = sock.sock = HeaderSockMock("data/header01.txt")
sock.send("Hello")
self.assertEqual(s.sent[0], six.b("\x81\x85abcd)\x07\x0f\x08\x0e"))
sock.send("こんにちは")
self.assertEqual(s.sent[1], six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc"))
sock.send(u"こんにちは")
self.assertEqual(s.sent[1], six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc"))
sock.send("x" * 127)
def testRecv(self):
# TODO: add longer frame data
sock = ws.WebSocket()
s = sock.sock = SockMock()
something = six.b("\x81\x8fabcd\x82\xe3\xf0\x87\xe3\xf1\x80\xe5\xca\x81\xe2\xc5\x82\xe3\xcc")
s.add_packet(something)
data = sock.recv()
self.assertEqual(data, "こんにちは")
s.add_packet(six.b("\x81\x85abcd)\x07\x0f\x08\x0e"))
data = sock.recv()
self.assertEqual(data, "Hello")
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testIter(self):
count = 2
for rsvp in ws.create_connection('ws://stream.meetup.com/2/rsvps'):
count -= 1
if count == 0:
break
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testNext(self):
sock = ws.create_connection('ws://stream.meetup.com/2/rsvps')
self.assertEqual(str, type(next(sock)))
def testInternalRecvStrict(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
s.add_packet(six.b("foo"))
s.add_packet(socket.timeout())
s.add_packet(six.b("bar"))
# s.add_packet(SSLError("The read operation timed out"))
s.add_packet(six.b("baz"))
with self.assertRaises(ws.WebSocketTimeoutException):
data = sock.frame_buffer.recv_strict(9)
# if six.PY2:
# with self.assertRaises(ws.WebSocketTimeoutException):
# data = sock._recv_strict(9)
# else:
# with self.assertRaises(SSLError):
# data = sock._recv_strict(9)
data = sock.frame_buffer.recv_strict(9)
self.assertEqual(data, six.b("foobarbaz"))
with self.assertRaises(ws.WebSocketConnectionClosedException):
data = sock.frame_buffer.recv_strict(1)
def testRecvTimeout(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
s.add_packet(six.b("\x81"))
s.add_packet(socket.timeout())
s.add_packet(six.b("\x8dabcd\x29\x07\x0f\x08\x0e"))
s.add_packet(socket.timeout())
s.add_packet(six.b("\x4e\x43\x33\x0e\x10\x0f\x00\x40"))
with self.assertRaises(ws.WebSocketTimeoutException):
data = sock.recv()
with self.assertRaises(ws.WebSocketTimeoutException):
data = sock.recv()
data = sock.recv()
self.assertEqual(data, "Hello, World!")
with self.assertRaises(ws.WebSocketConnectionClosedException):
data = sock.recv()
def testRecvWithSimpleFragmentation(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
# OPCODE=CONT, FIN=1, MSG="the soul of wit"
s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
data = sock.recv()
self.assertEqual(data, "Brevity is the soul of wit")
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
def testRecvWithFireEventOfFragmentation(self):
sock = ws.WebSocket(fire_cont_frame=True)
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x01\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
# OPCODE=CONT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x00\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
# OPCODE=CONT, FIN=1, MSG="the soul of wit"
s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
_, data = sock.recv_data()
self.assertEqual(data, six.b("Brevity is "))
_, data = sock.recv_data()
self.assertEqual(data, six.b("Brevity is "))
_, data = sock.recv_data()
self.assertEqual(data, six.b("the soul of wit"))
# OPCODE=CONT, FIN=0, MSG="Brevity is "
s.add_packet(six.b("\x80\x8babcd#\x10\x06\x12\x08\x16\x1aD\x08\x11C"))
with self.assertRaises(ws.WebSocketException):
sock.recv_data()
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
def testClose(self):
sock = ws.WebSocket()
sock.sock = SockMock()
sock.connected = True
sock.close()
self.assertEqual(sock.connected, False)
sock = ws.WebSocket()
s = sock.sock = SockMock()
sock.connected = True
s.add_packet(six.b('\x88\x80\x17\x98p\x84'))
sock.recv()
self.assertEqual(sock.connected, False)
def testRecvContFragmentation(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
# OPCODE=CONT, FIN=1, MSG="the soul of wit"
s.add_packet(six.b("\x80\x8fabcd\x15\n\x06D\x12\r\x16\x08A\r\x05D\x16\x0b\x17"))
self.assertRaises(ws.WebSocketException, sock.recv)
def testRecvWithProlongedFragmentation(self):
sock = ws.WebSocket()
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Once more unto the breach, "
s.add_packet(six.b("\x01\x9babcd.\x0c\x00\x01A\x0f\x0c\x16\x04B\x16\n\x15" \
"\rC\x10\t\x07C\x06\x13\x07\x02\x07\tNC"))
# OPCODE=CONT, FIN=0, MSG="dear friends, "
s.add_packet(six.b("\x00\x8eabcd\x05\x07\x02\x16A\x04\x11\r\x04\x0c\x07" \
"\x17MB"))
# OPCODE=CONT, FIN=1, MSG="once more"
s.add_packet(six.b("\x80\x89abcd\x0e\x0c\x00\x01A\x0f\x0c\x16\x04"))
data = sock.recv()
self.assertEqual(
data,
"Once more unto the breach, dear friends, once more")
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
def testRecvWithFragmentationAndControlFrame(self):
sock = ws.WebSocket()
sock.set_mask_key(create_mask_key)
s = sock.sock = SockMock()
# OPCODE=TEXT, FIN=0, MSG="Too much "
s.add_packet(six.b("\x01\x89abcd5\r\x0cD\x0c\x17\x00\x0cA"))
# OPCODE=PING, FIN=1, MSG="Please PONG this"
s.add_packet(six.b("\x89\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17"))
# OPCODE=CONT, FIN=1, MSG="of a good thing"
s.add_packet(six.b("\x80\x8fabcd\x0e\x04C\x05A\x05\x0c\x0b\x05B\x17\x0c" \
"\x08\x0c\x04"))
data = sock.recv()
self.assertEqual(data, "Too much of a good thing")
with self.assertRaises(ws.WebSocketConnectionClosedException):
sock.recv()
self.assertEqual(
s.sent[0],
six.b("\x8a\x90abcd1\x0e\x06\x05\x12\x07C4.,$D\x15\n\n\x17"))
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testWebSocket(self):
s = ws.create_connection("ws://echo.websocket.org/")
self.assertNotEqual(s, None)
s.send("Hello, World")
result = s.recv()
self.assertEqual(result, "Hello, World")
s.send(u"こにゃにゃちは、世界")
result = s.recv()
self.assertEqual(result, "こにゃにゃちは、世界")
s.close()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testPingPong(self):
s = ws.create_connection("ws://echo.websocket.org/")
self.assertNotEqual(s, None)
s.ping("Hello")
s.pong("Hi")
s.close()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
@unittest.skipUnless(TEST_SECURE_WS, "wss://echo.websocket.org doesn't work well.")
def testSecureWebSocket(self):
if 1:
import ssl
s = ws.create_connection("wss://echo.websocket.org/")
self.assertNotEqual(s, None)
self.assertTrue(isinstance(s.sock, ssl.SSLSocket))
s.send("Hello, World")
result = s.recv()
self.assertEqual(result, "Hello, World")
s.send(u"こにゃにゃちは、世界")
result = s.recv()
self.assertEqual(result, "こにゃにゃちは、世界")
s.close()
#except:
# pass
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testWebSocketWihtCustomHeader(self):
s = ws.create_connection("ws://echo.websocket.org/",
headers={"User-Agent": "PythonWebsocketClient"})
self.assertNotEqual(s, None)
s.send("Hello, World")
result = s.recv()
self.assertEqual(result, "Hello, World")
s.close()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testAfterClose(self):
s = ws.create_connection("ws://echo.websocket.org/")
self.assertNotEqual(s, None)
s.close()
self.assertRaises(ws.WebSocketConnectionClosedException, s.send, "Hello")
self.assertRaises(ws.WebSocketConnectionClosedException, s.recv)
def testNonce(self):
""" WebSocket key should be a random 16-byte nonce.
"""
key = _create_sec_websocket_key()
nonce = base64decode(key.encode("utf-8"))
self.assertEqual(16, len(nonce))
class WebSocketAppTest(unittest.TestCase):
class NotSetYet(object):
""" A marker class for signalling that a value hasn't been set yet.
"""
def setUp(self):
ws.enableTrace(TRACABLE)
WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
def tearDown(self):
WebSocketAppTest.keep_running_open = WebSocketAppTest.NotSetYet()
WebSocketAppTest.keep_running_close = WebSocketAppTest.NotSetYet()
WebSocketAppTest.get_mask_key_id = WebSocketAppTest.NotSetYet()
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testKeepRunning(self):
""" A WebSocketApp should keep running as long as its self.keep_running
is not False (in the boolean context).
"""
def on_open(self, *args, **kwargs):
""" Set the keep_running flag for later inspection and immediately
close the connection.
"""
WebSocketAppTest.keep_running_open = self.keep_running
self.close()
def on_close(self, *args, **kwargs):
""" Set the keep_running flag for the test to use.
"""
WebSocketAppTest.keep_running_close = self.keep_running
app = ws.WebSocketApp('ws://echo.websocket.org/', on_open=on_open, on_close=on_close)
app.run_forever()
self.assertFalse(isinstance(WebSocketAppTest.keep_running_open,
WebSocketAppTest.NotSetYet))
self.assertFalse(isinstance(WebSocketAppTest.keep_running_close,
WebSocketAppTest.NotSetYet))
self.assertEqual(True, WebSocketAppTest.keep_running_open)
self.assertEqual(False, WebSocketAppTest.keep_running_close)
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testSockMaskKey(self):
""" A WebSocketApp should forward the received mask_key function down
to the actual socket.
"""
def my_mask_key_func():
pass
def on_open(self, *args, **kwargs):
""" Set the value so the test can use it later on and immediately
close the connection.
"""
WebSocketAppTest.get_mask_key_id = id(self.get_mask_key)
self.close()
app = ws.WebSocketApp('ws://echo.websocket.org/', on_open=on_open, get_mask_key=my_mask_key_func)
app.run_forever()
# Note: We can't use 'is' for comparing the functions directly, need to use 'id'.
self.assertEqual(WebSocketAppTest.get_mask_key_id, id(my_mask_key_func))
class SockOptTest(unittest.TestCase):
@unittest.skipUnless(TEST_WITH_INTERNET, "Internet-requiring tests are disabled")
def testSockOpt(self):
sockopt = ((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),)
s = ws.create_connection("ws://echo.websocket.org", sockopt=sockopt)
self.assertNotEqual(s.sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY), 0)
s.close()
class UtilsTest(unittest.TestCase):
def testUtf8Validator(self):
state = validate_utf8(six.b('\xf0\x90\x80\x80'))
self.assertEqual(state, True)
state = validate_utf8(six.b('\xce\xba\xe1\xbd\xb9\xcf\x83\xce\xbc\xce\xb5\xed\xa0\x80edited'))
self.assertEqual(state, False)
state = validate_utf8(six.b(''))
self.assertEqual(state, True)
class ProxyInfoTest(unittest.TestCase):
def setUp(self):
self.http_proxy = os.environ.get("http_proxy", None)
self.https_proxy = os.environ.get("https_proxy", None)
if "http_proxy" in os.environ:
del os.environ["http_proxy"]
if "https_proxy" in os.environ:
del os.environ["https_proxy"]
def tearDown(self):
if self.http_proxy:
os.environ["http_proxy"] = self.http_proxy
elif "http_proxy" in os.environ:
del os.environ["http_proxy"]
if self.https_proxy:
os.environ["https_proxy"] = self.https_proxy
elif "https_proxy" in os.environ:
del os.environ["https_proxy"]
def testProxyFromArgs(self):
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128), ("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost"), ("localhost", 0, None))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128), ("localhost", 3128, None))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", False, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_auth=("a", "b")),
("localhost", 0, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, no_proxy=["example.com"], proxy_auth=("a", "b")),
("localhost", 3128, ("a", "b")))
self.assertEqual(get_proxy_info("echo.websocket.org", True, proxy_host="localhost", proxy_port=3128, no_proxy=["echo.websocket.org"], proxy_auth=("a", "b")),
(None, 0, None))
def testProxyFromEnv(self):
os.environ["http_proxy"] = "http://localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, None))
os.environ["http_proxy"] = "http://localhost/"
os.environ["https_proxy"] = "http://localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, None))
os.environ["http_proxy"] = "http://localhost:3128/"
os.environ["https_proxy"] = "http://localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, None))
os.environ["http_proxy"] = "http://a:b@localhost/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", False), ("localhost", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
self.assertEqual(get_proxy_info("echo.websocket.org", True), ("localhost2", 3128, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost/"
os.environ["https_proxy"] = "http://a:b@localhost2/"
os.environ["no_proxy"] = "example1.com,example2.com"
self.assertEqual(get_proxy_info("example.1.com", True), ("localhost2", None, ("a", "b")))
os.environ["http_proxy"] = "http://a:b@localhost:3128/"
os.environ["https_proxy"] = "http://a:b@localhost2:3128/"
os.environ["no_proxy"] = "example1.com,example2.com, echo.websocket.org"
self.assertEqual(get_proxy_info("echo.websocket.org", True), (None, 0, None))
if __name__ == "__main__":
unittest.main()
|
perror/checkproject | refs/heads/master | test/example/checks/check_02-codingstyle.py | 1 | from checkproject.case import CheckCase
class CheckProjectFiles3(CheckCase):
"""Checking the content of the projet."""
|
SPriyaJain/studybuddy | refs/heads/master | env/lib/python2.7/site-packages/requests/packages/urllib3/fields.py | 288 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
|
Samsung/skia | refs/heads/dev/m36_1985 | third_party/externals/gyp/test/win/gyptest-cl-default-char-is-unsigned.py | 269 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure DefaultCharIsUnsigned option is functional.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('default-char-is-unsigned.gyp', chdir=CHDIR)
test.build('default-char-is-unsigned.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
daniel-bulger/word-sense-disambiguator | refs/heads/master | disambiguator/get_similarities.py | 1 | from nltk.corpus import brown
import sys
from graph import Graph,merge_graphs
from nltk.tree import ParentedTree
from stat_parser import Parser
parser = Parser()
user_sentence = sys.argv[1]
query = sys.argv[2]
trees = []
done = 0
# for sentence in brown.sents():
# if done >= 20:
# break
# if not query in sentence:
# continue
# if len(sentence) > 20:
# continue
# try:
# trees.append(parser.parse(" ".join(sentence)))
# done += 1
# print done
# except:
# print "oops couldn't parse that one"
# trees = []
# trees.append(parser.parse("The food was on the table where the child likes to eat"))
# trees.append(parser.parse("I eat food at the table"))
# trees.append(parser.parse("I eat the food that is on the table"))
# trees.append(parser.parse("The money is on the table"))
# trees.append(parser.parse("Put the data in the table"))
# trees.append(parser.parse("Add more rows to the database table"))
# trees.append(parser.parse("Why is the database table empty It should have data in it"))
# trees.append(parser.parse("Do not put your elbows on the table while you eat"))
# trees.append(parser.parse("I like to eat at the kitchen table"))
# trees.append(parser.parse("I like to eat food at the kitchen table"))
trees = []
trees.append(parser.parse("a database table stores rows"))
trees.append(parser.parse("kitchen table"))
trees.append(parser.parse("food on kitchen table"))
trees.append(parser.parse("put rows in database table"))
trees.append(parser.parse("damaged wooden kitchen table"))
trees.append(parser.parse("wooden table"))
trees.append(parser.parse("the table that had been forgotten for years became damaged"))
trees.append(parser.parse("the database table for our favorite client, Bob's bakery, needs more rows"))
trees.append(parser.parse("the database table that stores sensitive client data should be secure"))
trees.append(parser.parse("the database table that stores passwords should be secure"))
trees.append(parser.parse("Motion to table discussion until later"))
trees.append(parser.parse("Please table the motion until a later date"))
trees.append(parser.parse("We should table, if not reject, this motion"))
for tree in trees:
tree = ParentedTree.convert(tree)
graphs = []
for tree in trees:
g = Graph(query)
g.update(tree)
graphs.append(g)
print "Merging graphs"
new_graph = merge_graphs(graphs)
print "Drawing graph (fake)"
new_graph.draw("new_graph_"+query)
print "Getting senses"
print new_graph.get_senses()
print "Prediction is..."
print new_graph.get_predicted_sense(ParentedTree.convert(parser.parse(user_sentence)))
#new_graph.print_relatedness_to_target_in_order() |
RogerioBorba/geonode | refs/heads/master | geonode/layers/views.py | 4 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import sys
import logging
import shutil
import traceback
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.conf import settings
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.utils import simplejson as json
from django.utils.html import escape
from django.template.defaultfilters import slugify
from django.forms.models import inlineformset_factory
from django.db.models import F
from geonode.tasks.deletion import delete_layer
from geonode.services.models import Service
from geonode.layers.forms import LayerForm, LayerUploadForm, NewLayerUploadForm, LayerAttributeForm
from geonode.base.forms import CategoryForm
from geonode.layers.models import Layer, Attribute, UploadSession
from geonode.base.enumerations import CHARSETS
from geonode.base.models import TopicCategory
from geonode.utils import default_map_config
from geonode.utils import GXPLayer
from geonode.utils import GXPMap
from geonode.layers.utils import file_upload, is_raster, is_vector
from geonode.utils import resolve_object, llbbox_to_mercator
from geonode.people.forms import ProfileForm, PocForm
from geonode.security.views import _perms_info_json
from geonode.documents.models import get_related_documents
from geonode.utils import build_social_links
from geonode.geoserver.helpers import cascading_delete, gs_catalog
CONTEXT_LOG_FILE = None
if 'geonode.geoserver' in settings.INSTALLED_APPS:
from geonode.geoserver.helpers import _render_thumbnail
from geonode.geoserver.helpers import ogc_server_settings
CONTEXT_LOG_FILE = ogc_server_settings.LOG_FILE
logger = logging.getLogger("geonode.layers.views")
DEFAULT_SEARCH_BATCH_SIZE = 10
MAX_SEARCH_BATCH_SIZE = 25
GENERIC_UPLOAD_ERROR = _("There was an error while attempting to upload your data. \
Please try again, or contact and administrator if the problem continues.")
_PERMISSION_MSG_DELETE = _("You are not permitted to delete this layer")
_PERMISSION_MSG_GENERIC = _('You do not have permissions for this layer.')
_PERMISSION_MSG_MODIFY = _("You are not permitted to modify this layer")
_PERMISSION_MSG_METADATA = _(
"You are not permitted to modify this layer's metadata")
_PERMISSION_MSG_VIEW = _("You are not permitted to view this layer")
def log_snippet(log_file):
if not os.path.isfile(log_file):
return "No log file at %s" % log_file
with open(log_file, "r") as f:
f.seek(0, 2) # Seek @ EOF
fsize = f.tell() # Get Size
f.seek(max(fsize - 10024, 0), 0) # Set pos @ last n chars
return f.read()
def _resolve_layer(request, typename, permission='base.view_resourcebase',
msg=_PERMISSION_MSG_GENERIC, **kwargs):
"""
Resolve the layer by the provided typename (which may include service name) and check the optional permission.
"""
service_typename = typename.split(":", 1)
if Service.objects.filter(name=service_typename[0]).exists():
service = Service.objects.filter(name=service_typename[0])
return resolve_object(request,
Layer,
{'service': service[0],
'typename': service_typename[1] if service[0].method != "C" else typename},
permission=permission,
permission_msg=msg,
**kwargs)
else:
return resolve_object(request,
Layer,
{'typename': typename,
'service': None},
permission=permission,
permission_msg=msg,
**kwargs)
# Basic Layer Views #
@login_required
def layer_upload(request, template='upload/layer_upload.html'):
if request.method == 'GET':
ctx = {
'charsets': CHARSETS,
'is_layer': True,
}
return render_to_response(template,
RequestContext(request, ctx))
elif request.method == 'POST':
form = NewLayerUploadForm(request.POST, request.FILES)
tempdir = None
errormsgs = []
out = {'success': False}
if form.is_valid():
title = form.cleaned_data["layer_title"]
# Replace dots in filename - GeoServer REST API upload bug
# and avoid any other invalid characters.
# Use the title if possible, otherwise default to the filename
if title is not None and len(title) > 0:
name_base = title
else:
name_base, __ = os.path.splitext(
form.cleaned_data["base_file"].name)
name = slugify(name_base.replace(".", "_"))
try:
# Moved this inside the try/except block because it can raise
# exceptions when unicode characters are present.
# This should be followed up in upstream Django.
tempdir, base_file = form.write_files()
saved_layer = file_upload(
base_file,
name=name,
user=request.user,
overwrite=False,
charset=form.cleaned_data["charset"],
abstract=form.cleaned_data["abstract"],
title=form.cleaned_data["layer_title"],
)
except Exception as e:
exception_type, error, tb = sys.exc_info()
logger.exception(e)
out['success'] = False
out['errors'] = str(error)
# Assign the error message to the latest UploadSession from that user.
latest_uploads = UploadSession.objects.filter(user=request.user).order_by('-date')
if latest_uploads.count() > 0:
upload_session = latest_uploads[0]
upload_session.error = str(error)
upload_session.traceback = traceback.format_exc(tb)
upload_session.context = log_snippet(CONTEXT_LOG_FILE)
upload_session.save()
out['traceback'] = upload_session.traceback
out['context'] = upload_session.context
out['upload_session'] = upload_session.id
else:
out['success'] = True
if hasattr(saved_layer, 'info'):
out['info'] = saved_layer.info
out['url'] = reverse(
'layer_detail', args=[
saved_layer.service_typename])
upload_session = saved_layer.upload_session
upload_session.processed = True
upload_session.save()
permissions = form.cleaned_data["permissions"]
if permissions is not None and len(permissions.keys()) > 0:
saved_layer.set_permissions(permissions)
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
else:
for e in form.errors.values():
errormsgs.extend([escape(v) for v in e])
out['errors'] = form.errors
out['errormsgs'] = errormsgs
if out['success']:
status_code = 200
else:
status_code = 400
return HttpResponse(
json.dumps(out),
mimetype='application/json',
status=status_code)
def layer_detail(request, layername, template='layers/layer_detail.html'):
layer = _resolve_layer(
request,
layername,
'base.view_resourcebase',
_PERMISSION_MSG_VIEW)
# assert False, str(layer_bbox)
config = layer.attribute_config()
# Add required parameters for GXP lazy-loading
layer_bbox = layer.bbox
bbox = [float(coord) for coord in list(layer_bbox[0:4])]
srid = layer.srid
# Transform WGS84 to Mercator.
config["srs"] = srid if srid != "EPSG:4326" else "EPSG:900913"
config["bbox"] = llbbox_to_mercator([float(coord) for coord in bbox])
config["title"] = layer.title
config["queryable"] = True
if layer.storeType == "remoteStore":
service = layer.service
source_params = {
"ptype": service.ptype,
"remote": True,
"url": service.base_url,
"name": service.name}
maplayer = GXPLayer(
name=layer.typename,
ows_url=layer.ows_url,
layer_params=json.dumps(config),
source_params=json.dumps(source_params))
else:
maplayer = GXPLayer(
name=layer.typename,
ows_url=layer.ows_url,
layer_params=json.dumps(config))
# Update count for popularity ranking,
# but do not includes admins or resource owners
if request.user != layer.owner and not request.user.is_superuser:
Layer.objects.filter(
id=layer.id).update(popular_count=F('popular_count') + 1)
# center/zoom don't matter; the viewer will center on the layer bounds
map_obj = GXPMap(projection="EPSG:900913")
NON_WMS_BASE_LAYERS = [
la for la in default_map_config()[1] if la.ows_url is None]
metadata = layer.link_set.metadata().filter(
name__in=settings.DOWNLOAD_FORMATS_METADATA)
context_dict = {
"resource": layer,
"permissions_json": _perms_info_json(layer),
"documents": get_related_documents(layer),
"metadata": metadata,
"is_layer": True,
"wps_enabled": settings.OGC_SERVER['default']['WPS_ENABLED'],
}
context_dict["viewer"] = json.dumps(
map_obj.viewer_json(request.user, * (NON_WMS_BASE_LAYERS + [maplayer])))
context_dict["preview"] = getattr(
settings,
'LAYER_PREVIEW_LIBRARY',
'leaflet')
if request.user.has_perm('download_resourcebase', layer.get_self_resource()):
if layer.storeType == 'dataStore':
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_VECTOR)
else:
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_RASTER)
context_dict["links"] = links
if settings.SOCIAL_ORIGINS:
context_dict["social_links"] = build_social_links(request, layer)
return render_to_response(template, RequestContext(request, context_dict))
@login_required
def layer_metadata(request, layername, template='layers/layer_metadata.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase_metadata',
_PERMISSION_MSG_METADATA)
layer_attribute_set = inlineformset_factory(
Layer,
Attribute,
extra=0,
form=LayerAttributeForm,
)
topic_category = layer.category
poc = layer.poc
metadata_author = layer.metadata_author
if request.method == "POST":
layer_form = LayerForm(request.POST, instance=layer, prefix="resource")
attribute_form = layer_attribute_set(
request.POST,
instance=layer,
prefix="layer_attribute_set",
queryset=Attribute.objects.order_by('display_order'))
category_form = CategoryForm(
request.POST,
prefix="category_choice_field",
initial=int(
request.POST["category_choice_field"]) if "category_choice_field" in request.POST else None)
else:
layer_form = LayerForm(instance=layer, prefix="resource")
attribute_form = layer_attribute_set(
instance=layer,
prefix="layer_attribute_set",
queryset=Attribute.objects.order_by('display_order'))
category_form = CategoryForm(
prefix="category_choice_field",
initial=topic_category.id if topic_category else None)
if request.method == "POST" and layer_form.is_valid(
) and attribute_form.is_valid() and category_form.is_valid():
new_poc = layer_form.cleaned_data['poc']
new_author = layer_form.cleaned_data['metadata_author']
new_keywords = layer_form.cleaned_data['keywords']
if new_poc is None:
if poc is None:
poc_form = ProfileForm(
request.POST,
prefix="poc",
instance=poc)
else:
poc_form = ProfileForm(request.POST, prefix="poc")
if poc_form.has_changed and poc_form.is_valid():
new_poc = poc_form.save()
if new_author is None:
if metadata_author is None:
author_form = ProfileForm(request.POST, prefix="author",
instance=metadata_author)
else:
author_form = ProfileForm(request.POST, prefix="author")
if author_form.has_changed and author_form.is_valid():
new_author = author_form.save()
new_category = TopicCategory.objects.get(
id=category_form.cleaned_data['category_choice_field'])
for form in attribute_form.cleaned_data:
la = Attribute.objects.get(id=int(form['id'].id))
la.description = form["description"]
la.attribute_label = form["attribute_label"]
la.visible = form["visible"]
la.display_order = form["display_order"]
la.save()
if new_poc is not None and new_author is not None:
new_keywords = layer_form.cleaned_data['keywords']
layer.keywords.clear()
layer.keywords.add(*new_keywords)
the_layer = layer_form.save()
the_layer.poc = new_poc
the_layer.metadata_author = new_author
Layer.objects.filter(id=the_layer.id).update(
category=new_category
)
return HttpResponseRedirect(
reverse(
'layer_detail',
args=(
layer.service_typename,
)))
if poc is None:
poc_form = ProfileForm(instance=poc, prefix="poc")
else:
layer_form.fields['poc'].initial = poc.id
poc_form = ProfileForm(prefix="poc")
poc_form.hidden = True
if metadata_author is None:
author_form = ProfileForm(instance=metadata_author, prefix="author")
else:
layer_form.fields['metadata_author'].initial = metadata_author.id
author_form = ProfileForm(prefix="author")
author_form.hidden = True
return render_to_response(template, RequestContext(request, {
"layer": layer,
"layer_form": layer_form,
"poc_form": poc_form,
"author_form": author_form,
"attribute_form": attribute_form,
"category_form": category_form,
}))
@login_required
def layer_change_poc(request, ids, template='layers/layer_change_poc.html'):
layers = Layer.objects.filter(id__in=ids.split('_'))
if request.method == 'POST':
form = PocForm(request.POST)
if form.is_valid():
for layer in layers:
layer.poc = form.cleaned_data['contact']
layer.save()
# Process the data in form.cleaned_data
# ...
# Redirect after POST
return HttpResponseRedirect('/admin/maps/layer')
else:
form = PocForm() # An unbound form
return render_to_response(
template, RequestContext(
request, {
'layers': layers, 'form': form}))
@login_required
def layer_replace(request, layername, template='layers/layer_replace.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_MODIFY)
if request.method == 'GET':
ctx = {
'charsets': CHARSETS,
'layer': layer,
'is_featuretype': layer.is_vector(),
'is_layer': True,
}
return render_to_response(template,
RequestContext(request, ctx))
elif request.method == 'POST':
form = LayerUploadForm(request.POST, request.FILES)
tempdir = None
out = {}
if form.is_valid():
try:
tempdir, base_file = form.write_files()
if layer.is_vector() and is_raster(base_file):
out['success'] = False
out['errors'] = _("You are attempting to replace a vector layer with a raster.")
elif (not layer.is_vector()) and is_vector(base_file):
out['success'] = False
out['errors'] = _("You are attempting to replace a raster layer with a vector.")
else:
# delete geoserver's store before upload
cat = gs_catalog
cascading_delete(cat, layer.typename)
saved_layer = file_upload(
base_file,
name=layer.name,
user=request.user,
overwrite=True,
charset=form.cleaned_data["charset"],
)
out['success'] = True
out['url'] = reverse(
'layer_detail', args=[
saved_layer.service_typename])
except Exception as e:
out['success'] = False
out['errors'] = str(e)
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
else:
errormsgs = []
for e in form.errors.values():
errormsgs.append([escape(v) for v in e])
out['errors'] = form.errors
out['errormsgs'] = errormsgs
if out['success']:
status_code = 200
else:
status_code = 400
return HttpResponse(
json.dumps(out),
mimetype='application/json',
status=status_code)
@login_required
def layer_remove(request, layername, template='layers/layer_remove.html'):
layer = _resolve_layer(
request,
layername,
'base.delete_resourcebase',
_PERMISSION_MSG_DELETE)
if (request.method == 'GET'):
return render_to_response(template, RequestContext(request, {
"layer": layer
}))
if (request.method == 'POST'):
try:
delete_layer.delay(object_id=layer.id)
except Exception as e:
message = '{0}: {1}.'.format(_('Unable to delete layer'), layer.typename)
if 'referenced by layer group' in getattr(e, 'message', ''):
message = _('This layer is a member of a layer group, you must remove the layer from the group '
'before deleting.')
messages.error(request, message)
return render_to_response(template, RequestContext(request, {"layer": layer}))
return HttpResponseRedirect(reverse("layer_browse"))
else:
return HttpResponse("Not allowed", status=403)
def layer_thumbnail(request, layername):
if request.method == 'POST':
layer_obj = _resolve_layer(request, layername)
try:
image = _render_thumbnail(request.body)
if not image:
return
filename = "layer-%s-thumb.png" % layer_obj.uuid
layer_obj.save_thumbnail(filename, image)
return HttpResponse('Thumbnail saved')
except:
return HttpResponse(
content='error saving thumbnail',
status=500,
mimetype='text/plain'
)
|
andresriancho/billiard | refs/heads/master | billiard/popen_fork.py | 2 | import os
import sys
import signal
import errno
from . import util
__all__ = ['Popen']
#
# Start child process using fork
#
class Popen(object):
method = 'fork'
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self._launch(process_obj)
def duplicate_for_child(self, fd):
return fd
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except OSError as e:
if e.errno == errno.EINTR:
continue
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if self.returncode is None:
if timeout is not None:
from multiprocessing.connection import wait
if not wait([self.sentinel], timeout):
return None
# This shouldn't block if wait() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as exc:
if getattr(exc, 'errno', None) != errno.ESRCH:
if self.wait(timeout=0.1) is None:
raise
def _launch(self, process_obj):
code = 1
parent_r, child_w = os.pipe()
self.pid = os.fork()
if self.pid == 0:
try:
os.close(parent_r)
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
finally:
os._exit(code)
else:
os.close(child_w)
util.Finalize(self, os.close, (parent_r,))
self.sentinel = parent_r
|
jericks/geoscript-py | refs/heads/master | geoscript/geom/multipolygon.py | 1 | from com.vividsolutions.jts.geom import MultiPolygon as _MultiPolygon
from geoscript import core
from polygon import Polygon
import geom
class MultiPolygon(_MultiPolygon):
"""
A MultiPolygon geometry.
*polygons* is a variable number of multidimensional lists of ``list``/``tuple``.
>>> MultiPolygon( [ [[1,2],[3,4],[5,6],[1,2]] ], [ [[7,8], [9,10], [11,12], [7,8]] ] )
MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((7 8, 9 10, 11 12, 7 8)))
*polygons* may also be specified as a variable number of :class:`Polygon` arguments.
>>> MultiPolygon(Polygon([[1,2], [3,4], [5,6], [1,2]]), Polygon([[7,8], [9,10], [11,12], [7,8]]))
MULTIPOLYGON (((1 2, 3 4, 5 6, 1 2)), ((7 8, 9 10, 11 12, 7 8)))
"""
def __init__(self, *polygons):
if isinstance(polygons[0], _MultiPolygon):
mp = polygons[0]
polygons = [mp.getGeometryN(i) for i in range(mp.numGeometries)]
elif isinstance(polygons[0], (list,tuple)):
polygons = [Polygon(*p) for p in polygons]
_MultiPolygon.__init__(self, polygons, geom._factory)
geom._enhance(MultiPolygon)
core.registerTypeMapping(_MultiPolygon, MultiPolygon)
|
supersven/intellij-community | refs/heads/master | python/testData/resolve/multiFile/importSubmodule/ImportSubmodule.py | 83 | import p1.m1
p1.m1
# <ref> |
etherkit/OpenBeacon2 | refs/heads/master | client/linux-x86/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-boto.py | 3 | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
#
# Boto3, the next version of Boto, is now stable and recommended for general
# use.
#
# Boto is an integrated interface to current and future infrastructural
# services offered by Amazon Web Services.
#
# http://boto.readthedocs.org/en/latest/
#
# Tested with boto 2.38.0
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('boto')
|
open-synergy/vertical-community | refs/heads/8.0 | __unreviewed__/community/__init__.py | 4 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron. Copyright Yannick Buron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import community
import res_config
|
enovance/numeter | refs/heads/master | web-app/numeter_webapp/multiviews/management/commands/skeleton.py | 2 | from django.core.management.base import BaseCommand, CommandError
from core.models import Host, Plugin, Data_Source as Source
from multiviews.models import Skeleton, View
from configuration.forms.skeleton import Skeleton_Form
from core.management.commands._utils import CommandDispatcher
from optparse import make_option
from os import devnull
import sys
class Command(CommandDispatcher):
"""Skeleton management base command."""
actions = ('list','add','delete','del','modify','mod','create_view')
def _subcommand_names(self):
return ('list','add','delete','del','modify','mod','create_view')
def _subcommand(self, *args, **opts):
"""Dispatch in a Command by reading first argv."""
if not args or args[0] not in self.actions:
self.stdout.write(self.usage('host'))
elif args[0] == 'list':
return List_Command()
elif args[0] == 'add':
return Add_Command()
elif args[0] in ('delete','del'):
return Delete_Command()
elif args[0] in ('modify','mod'):
return Modify_Command()
elif args[0] == 'create_view':
return Create_View_Command()
ROW_FORMAT = '{id:5} | {name:40} | {plugin_pattern:20} | {source_pattern:20}'
class List_Command(BaseCommand):
option_list = BaseCommand.option_list + (
)
def handle(self, *args, **opts):
self.stdout.write(ROW_FORMAT.format(**{u'id': 'ID', 'plugin_pattern': 'Plugin pattern', 'source_pattern': 'Source Pattern', 'name': u'Name'}))
for s in Skeleton.objects.all():
self.stdout.write(ROW_FORMAT.format(**s.__dict__))
# Print count
self.stdout.write('Count: %i' % Skeleton.objects.count())
class Add_Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-n', '--name', action='store', default=None, help="Set name."),
make_option('-p', '--plugin_pattern', action='store', default=None, help="Set plugin pattern."),
make_option('-s', '--source_pattern', action='store', default=None, help="Set source pattern."),
make_option('-C', '--comment', action='store', default=None, help="Set comment."),
make_option('-q', '--quiet', action='store_true', help="Don't print info."),
)
def handle(self, *args, **opts):
if opts['quiet']: self.stdout = open(devnull, 'w')
# Use Form to valid
F = Skeleton_Form(data=opts)
if F.is_valid():
s = F.save()
self.stdout.write(ROW_FORMAT.format(**{u'id': 'ID', 'plugin_pattern': 'Plugin pattern', 'source_pattern': 'Source Pattern', 'name': u'Name'}))
self.stdout.write(ROW_FORMAT.format(**s.__dict__))
else:
for field,errors in F.errors.items():
self.stdout.write(field)
for err in errors:
self.stdout.write('\t'+err)
class Delete_Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-i', '--ids', action='store', default=None, help="Select skeletons by ID separated by comma"),
make_option('-q', '--quiet', action='store_true', help="Don't print info"),
)
def handle(self, *args, **opts):
if opts['quiet']: self.stdout = open(devnull, 'w')
# Select skeleton by ids
if opts['ids']:
ids = [ i.strip() for i in opts['ids'].split(',') ]
skeletons = Skeleton.objects.filter(id__in=ids)
else:
self.stdout.write("You must give one or more ID.")
self.print_help('skeleton', 'help')
sys.exit(1)
# Stop if no given id
if not skeletons.exists():
self.stdout.write("There's no skeleton with given ID: '%s'" % (opts['ids'] or opts['id']) )
sys.exit(1)
for s in skeletons:
s.delete()
self.stdout.write('Delete skeleton: %s' % s)
class Modify_Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-i', '--ids', action='store', help="Select skeletons by ID separated by comma"),
make_option('-n', '--name', action='store', default=None, help="Set name."),
make_option('-p', '--plugin_pattern', action='store', default=None, help="Set plugin pattern."),
make_option('-s', '--source_pattern', action='store', default=None, help="Set source pattern."),
make_option('-C', '--comment', action='store', default=None, help="Set comment."),
make_option('-q', '--quiet', action='store_true', help="Don't print info"),
)
def handle(self, *args, **opts):
if opts['quiet']: self.stdout = open(devnull, 'w')
# Select host by id or ids
ids = [ i.strip() for i in opts['ids'].split(',') ]
skeletons = Skeleton.objects.filter(id__in=ids)
# Stop if no given id
if not skeletons.exists():
self.stdout.write("There's no skeleton with given ID: '%s'" % (opts['ids'] or opts['id']) )
sys.exit(1)
# Walk on skeleton for valid or fail
modified_skeletons = []
form_error = None
for s in skeletons:
# Use Form to valid
data = s.__dict__
data.update(dict([ (k,v) for k,v in opts.items() if v ]))
F = Skeleton_Form(data=data, instance=s)
if F.is_valid():
s = F.save()
modified_skeletons.append(s)
else:
form_error = F.errors
# Walk on all list to print it
if modified_skeletons:
self.stdout.write('* Skeleton updated:')
for s in modified_skeletons:
self.stdout.write(ROW_FORMAT.format(**{u'id': 'ID', 'plugin_pattern': 'Plugin pattern', 'source_pattern': 'Source Pattern', 'name': u'Name'}))
self.stdout.write(ROW_FORMAT.format(**s.__dict__))
if form_error:
self.stdout.write('* Error:')
for field,errors in form_error.items():
self.stdout.write(field)
for err in errors:
self.stdout.write('\t'+err)
VIEW_ROW_FORMAT = '{id:5} | {name:30} | {sources:50}'
class Create_View_Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-i', '--id', action='store', help="Select skeleton by ID separated by comma"),
make_option('-I', '--hostids', action='store', help="Select hosts by ID separated by comma"),
make_option('-n', '--name', action='store', default=None, help="Set view name."),
make_option('-q', '--quiet', action='store_true', help="Don't print info"),
)
def handle(self, *args, **opts):
if opts['quiet']: self.stdout = open(devnull, 'w')
# Select skeleton by id or ids
if Skeleton.objects.filter(id=opts['id']).exists():
skeleton = Skeleton.objects.get(id=opts['id'])
else:
self.stdout.write("There's no skeleton with given ID: '%s'" % opts['id'])
sys.exit(1)
# Select host by id or ids
hostids = [ i.strip() for i in opts['hostids'].split(',') ]
hosts = Host.objects.filter(hostid__in=hostids)
# Stop if no given id
if not hosts.exists():
self.stdout.write("There's no skeleton with given ID: '%s'" % (opts['ids'] or opts['id']) )
sys.exit(1)
v = skeleton.create_view(opts['name'], hosts)
v_data = v.__dict__
v_data['sources'] = ', '.join([ str(s.id) for s in v.sources.all() ])
self.stdout.write(VIEW_ROW_FORMAT.format(**{u'id': 'ID', 'name': 'Name', 'sources': 'Source'}))
self.stdout.write(VIEW_ROW_FORMAT.format(**v_data))
|
Nexenta/cinder | refs/heads/master | cinder/volume/flows/api/create_volume.py | 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import units
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder.common import constants
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LW
from cinder import objects
from cinder.objects import fields
from cinder import policy
from cinder import quota
from cinder import quota_utils
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
GB = units.Gi
QUOTAS = quota.QUOTAS
# Only in these 'sources' status can we attempt to create a volume from a
# source volume or a source snapshot, other status states we can not create
# from, 'error' being the common example.
SNAPSHOT_PROCEED_STATUS = (fields.SnapshotStatus.AVAILABLE,)
SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)
REPLICA_PROCEED_STATUS = ('active', 'active-stopped',)
CG_PROCEED_STATUS = ('available', 'creating',)
CGSNAPSHOT_PROCEED_STATUS = ('available',)
GROUP_PROCEED_STATUS = ('available', 'creating',)
class ExtractVolumeRequestTask(flow_utils.CinderTask):
"""Processes an api request values into a validated set of values.
This tasks responsibility is to take in a set of inputs that will form
a potential volume request and validates those values against a set of
conditions and/or translates those values into a valid set and then returns
the validated/translated values for use by other tasks.
Reversion strategy: N/A
"""
# This task will produce the following outputs (said outputs can be
# saved to durable storage in the future so that the flow can be
# reconstructed elsewhere and continued).
default_provides = set(['availability_zone', 'size', 'snapshot_id',
'source_volid', 'volume_type', 'volume_type_id',
'encryption_key_id', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id',
'qos_specs', 'group_id'])
def __init__(self, image_service, availability_zones, **kwargs):
super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION],
**kwargs)
self.image_service = image_service
self.availability_zones = availability_zones
@staticmethod
def _extract_resource(resource, allowed_vals, exc, resource_name,
props=('status',)):
"""Extracts the resource id from the provided resource.
This method validates the input resource dict and checks that the
properties which names are passed in `props` argument match
corresponding lists in `allowed` argument. In case of mismatch
exception of type exc is raised.
:param resource: Resource dict.
:param allowed_vals: Tuple of allowed values lists.
:param exc: Exception type to raise.
:param resource_name: Name of resource - used to construct log message.
:param props: Tuple of resource properties names to validate.
:return: Id of a resource.
"""
resource_id = None
if resource:
for prop, allowed_states in zip(props, allowed_vals):
if resource[prop] not in allowed_states:
msg = _("Originating %(res)s %(prop)s must be one of "
"'%(vals)s' values")
msg = msg % {'res': resource_name,
'prop': prop,
'vals': ', '.join(allowed_states)}
# TODO(harlowja): what happens if the status changes after
# this initial resource status check occurs??? Seems like
# someone could delete the resource after this check passes
# but before the volume is officially created?
raise exc(reason=msg)
resource_id = resource['id']
return resource_id
def _extract_consistencygroup(self, consistencygroup):
return self._extract_resource(consistencygroup, (CG_PROCEED_STATUS,),
exception.InvalidConsistencyGroup,
'consistencygroup')
def _extract_group(self, group):
return self._extract_resource(group, (GROUP_PROCEED_STATUS,),
exception.InvalidGroup,
'group')
def _extract_cgsnapshot(self, cgsnapshot):
return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,),
exception.InvalidCgSnapshot,
'CGSNAPSHOT')
def _extract_snapshot(self, snapshot):
return self._extract_resource(snapshot, (SNAPSHOT_PROCEED_STATUS,),
exception.InvalidSnapshot, 'snapshot')
def _extract_source_volume(self, source_volume):
return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,),
exception.InvalidVolume, 'source volume')
def _extract_source_replica(self, source_replica):
return self._extract_resource(source_replica, (SRC_VOL_PROCEED_STATUS,
REPLICA_PROCEED_STATUS),
exception.InvalidVolume,
'replica', ('status',
'replication_status'))
@staticmethod
def _extract_size(size, source_volume, snapshot):
"""Extracts and validates the volume size.
This function will validate or when not provided fill in the provided
size variable from the source_volume or snapshot and then does
validation on the size that is found and returns said validated size.
"""
def validate_snap_size(size):
if snapshot and size < snapshot.volume_size:
msg = _("Volume size '%(size)s'GB cannot be smaller than"
" the snapshot size %(snap_size)sGB. "
"They must be >= original snapshot size.")
msg = msg % {'size': size,
'snap_size': snapshot.volume_size}
raise exception.InvalidInput(reason=msg)
def validate_source_size(size):
if source_volume and size < source_volume['size']:
msg = _("Volume size '%(size)s'GB cannot be smaller than "
"original volume size %(source_size)sGB. "
"They must be >= original volume size.")
msg = msg % {'size': size,
'source_size': source_volume['size']}
raise exception.InvalidInput(reason=msg)
def validate_int(size):
if not isinstance(size, int) or size <= 0:
msg = _("Volume size '%(size)s' must be an integer and"
" greater than 0") % {'size': size}
raise exception.InvalidInput(reason=msg)
# Figure out which validation functions we should be applying
# on the size value that we extract.
validator_functors = [validate_int]
if source_volume:
validator_functors.append(validate_source_size)
elif snapshot:
validator_functors.append(validate_snap_size)
# If the size is not provided then try to provide it.
if not size and source_volume:
size = source_volume['size']
elif not size and snapshot:
size = snapshot.volume_size
size = utils.as_int(size)
LOG.debug("Validating volume '%(size)s' using %(functors)s" %
{'size': size,
'functors': ", ".join([common.make_pretty_name(func)
for func in validator_functors])})
for func in validator_functors:
func(size)
return size
def _check_image_metadata(self, context, image_id, size):
"""Checks image existence and validates that the image metadata."""
# Check image existence
if image_id is None:
return
# NOTE(harlowja): this should raise an error if the image does not
# exist, this is expected as it signals that the image_id is missing.
image_meta = self.image_service.show(context, image_id)
# check whether image is active
if image_meta['status'] != 'active':
msg = _('Image %(image_id)s is not active.')\
% {'image_id': image_id}
raise exception.InvalidInput(reason=msg)
# Check image size is not larger than volume size.
image_size = utils.as_int(image_meta['size'], quiet=False)
image_size_in_gb = (image_size + GB - 1) // GB
if image_size_in_gb > size:
msg = _('Size of specified image %(image_size)sGB'
' is larger than volume size %(volume_size)sGB.')
msg = msg % {'image_size': image_size_in_gb, 'volume_size': size}
raise exception.InvalidInput(reason=msg)
# Check image min_disk requirement is met for the particular volume
min_disk = image_meta.get('min_disk', 0)
if size < min_disk:
msg = _('Volume size %(volume_size)sGB cannot be smaller'
' than the image minDisk size %(min_disk)sGB.')
msg = msg % {'volume_size': size, 'min_disk': min_disk}
raise exception.InvalidInput(reason=msg)
def _get_image_volume_type(self, context, image_id):
"""Get cinder_img_volume_type property from the image metadata."""
# Check image existence
if image_id is None:
return None
image_meta = self.image_service.show(context, image_id)
# check whether image is active
if image_meta['status'] != 'active':
msg = (_('Image %(image_id)s is not active.') %
{'image_id': image_id})
raise exception.InvalidInput(reason=msg)
# Retrieve 'cinder_img_volume_type' property from glance image
# metadata.
image_volume_type = "cinder_img_volume_type"
properties = image_meta.get('properties')
if properties:
try:
img_vol_type = properties.get(image_volume_type)
if img_vol_type is None:
return None
volume_type = volume_types.get_volume_type_by_name(
context,
img_vol_type)
except exception.VolumeTypeNotFoundByName:
LOG.warning(_LW("Failed to retrieve volume_type from image "
"metadata. '%(img_vol_type)s' doesn't match "
"any volume types."),
{'img_vol_type': img_vol_type})
return None
LOG.debug("Retrieved volume_type from glance image metadata. "
"image_id: %(image_id)s, "
"image property: %(image_volume_type)s, "
"volume_type: %(volume_type)s." %
{'image_id': image_id,
'image_volume_type': image_volume_type,
'volume_type': volume_type})
return volume_type
def _extract_availability_zone(self, availability_zone, snapshot,
source_volume, group):
"""Extracts and returns a validated availability zone.
This function will extract the availability zone (if not provided) from
the snapshot or source_volume and then performs a set of validation
checks on the provided or extracted availability zone and then returns
the validated availability zone.
"""
# If the volume will be created in a group, it should be placed in
# in same availability zone as the group.
if group:
try:
availability_zone = group['availability_zone']
except (TypeError, KeyError):
pass
# Try to extract the availability zone from the corresponding snapshot
# or source volume if either is valid so that we can be in the same
# availability zone as the source.
if availability_zone is None:
if snapshot:
try:
availability_zone = snapshot['volume']['availability_zone']
except (TypeError, KeyError):
pass
if source_volume and availability_zone is None:
try:
availability_zone = source_volume['availability_zone']
except (TypeError, KeyError):
pass
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
if availability_zone not in self.availability_zones:
if CONF.allow_availability_zone_fallback:
original_az = availability_zone
availability_zone = (
CONF.default_availability_zone or
CONF.storage_availability_zone)
LOG.warning(_LW("Availability zone '%(s_az)s' "
"not found, falling back to "
"'%(s_fallback_az)s'."),
{'s_az': original_az,
's_fallback_az': availability_zone})
else:
msg = _("Availability zone '%(s_az)s' is invalid.")
msg = msg % {'s_az': availability_zone}
raise exception.InvalidInput(reason=msg)
# If the configuration only allows cloning to the same availability
# zone then we need to enforce that.
if CONF.cloned_volume_same_az:
snap_az = None
try:
snap_az = snapshot['volume']['availability_zone']
except (TypeError, KeyError):
pass
if snap_az and snap_az != availability_zone:
msg = _("Volume must be in the same "
"availability zone as the snapshot")
raise exception.InvalidInput(reason=msg)
source_vol_az = None
try:
source_vol_az = source_volume['availability_zone']
except (TypeError, KeyError):
pass
if source_vol_az and source_vol_az != availability_zone:
msg = _("Volume must be in the same "
"availability zone as the source volume")
raise exception.InvalidInput(reason=msg)
return availability_zone
def _get_encryption_key_id(self, key_manager, context, volume_type_id,
snapshot, source_volume):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
if snapshot is not None: # creating from snapshot
encryption_key_id = snapshot['encryption_key_id']
elif source_volume is not None: # cloning volume
encryption_key_id = source_volume['encryption_key_id']
# NOTE(joel-coffman): References to the encryption key should *not*
# be copied because the key is deleted when the volume is deleted.
# Clone the existing key and associate a separate -- but
# identical -- key with each volume.
if encryption_key_id is not None:
encryption_key_id = key_manager.store(
context, key_manager.get(context, encryption_key_id))
else:
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
# NOTE(kaitlin-farr): dm-crypt expects the cipher in a
# hyphenated format (aes-xts-plain64). The algorithm needs
# to be parsed out to pass to the key manager (aes).
algorithm = cipher.split('-')[0] if cipher else None
encryption_key_id = key_manager.create_key(context,
algorithm=algorithm,
length=length)
return encryption_key_id
def _get_volume_type_id(self, volume_type, source_volume, snapshot):
if not volume_type and source_volume:
return source_volume['volume_type_id']
elif snapshot is not None:
if volume_type:
current_volume_type_id = volume_type.get('id')
if current_volume_type_id != snapshot['volume_type_id']:
msg = _LW("Volume type will be changed to "
"be the same as the source volume.")
LOG.warning(msg)
return snapshot['volume_type_id']
else:
return volume_type.get('id')
def execute(self, context, size, snapshot, image_id, source_volume,
availability_zone, volume_type, metadata, key_manager,
source_replica, consistencygroup, cgsnapshot, group):
utils.check_exclusive_options(snapshot=snapshot,
imageRef=image_id,
source_volume=source_volume)
policy.enforce_action(context, ACTION)
# TODO(harlowja): what guarantee is there that the snapshot or source
# volume will remain available after we do this initial verification??
snapshot_id = self._extract_snapshot(snapshot)
source_volid = self._extract_source_volume(source_volume)
source_replicaid = self._extract_source_replica(source_replica)
size = self._extract_size(size, source_volume, snapshot)
consistencygroup_id = self._extract_consistencygroup(consistencygroup)
cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot)
group_id = self._extract_group(group)
self._check_image_metadata(context, image_id, size)
availability_zone = self._extract_availability_zone(availability_zone,
snapshot,
source_volume,
group)
# TODO(joel-coffman): This special handling of snapshots to ensure that
# their volume type matches the source volume is too convoluted. We
# should copy encryption metadata from the encrypted volume type to the
# volume upon creation and propagate that information to each snapshot.
# This strategy avoids any dependency upon the encrypted volume type.
def_vol_type = volume_types.get_default_volume_type()
if not volume_type and not source_volume and not snapshot:
image_volume_type = self._get_image_volume_type(context, image_id)
volume_type = (image_volume_type if image_volume_type else
def_vol_type)
# When creating a clone of a replica (replication test), we can't
# use the volume type of the replica, therefore, we use the default.
# NOTE(ronenkat): this assumes the default type is not replicated.
if source_replicaid:
volume_type = def_vol_type
volume_type_id = self._get_volume_type_id(volume_type,
source_volume, snapshot)
encryption_key_id = self._get_encryption_key_id(key_manager,
context,
volume_type_id,
snapshot,
source_volume)
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id)
if qos_specs['qos_specs']:
specs = qos_specs['qos_specs'].get('specs', {})
if not specs:
# to make sure we don't pass empty dict
specs = None
return {
'size': size,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'availability_zone': availability_zone,
'volume_type': volume_type,
'volume_type_id': volume_type_id,
'encryption_key_id': encryption_key_id,
'qos_specs': specs,
'source_replicaid': source_replicaid,
'consistencygroup_id': consistencygroup_id,
'cgsnapshot_id': cgsnapshot_id,
'group_id': group_id,
}
class EntryCreateTask(flow_utils.CinderTask):
"""Creates an entry for the given volume creation in the database.
Reversion strategy: remove the volume_id created from the database.
"""
default_provides = set(['volume_properties', 'volume_id', 'volume'])
def __init__(self, db):
requires = ['availability_zone', 'description', 'metadata',
'name', 'reservations', 'size', 'snapshot_id',
'source_volid', 'volume_type_id', 'encryption_key_id',
'source_replicaid', 'consistencygroup_id',
'cgsnapshot_id', 'multiattach', 'qos_specs',
'group_id', ]
super(EntryCreateTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, optional_args, **kwargs):
"""Creates a database entry for the given inputs and returns details.
Accesses the database and creates a new entry for the to be created
volume using the given volume properties which are extracted from the
input kwargs (and associated requirements this task needs). These
requirements should be previously satisfied and validated by a
pre-cursor task.
"""
volume_properties = {
'size': kwargs.pop('size'),
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': kwargs.pop('encryption_key_id'),
# Rename these to the internal name.
'display_description': kwargs.pop('description'),
'display_name': kwargs.pop('name'),
'replication_status': 'disabled',
'multiattach': kwargs.pop('multiattach'),
}
# Merge in the other required arguments which should provide the rest
# of the volume property fields (if applicable).
volume_properties.update(kwargs)
volume = objects.Volume(context=context, **volume_properties)
volume.create()
# FIXME(dulek): We're passing this volume_properties dict through RPC
# in request_spec. This shouldn't be needed, most data is replicated
# in both volume and other places. We should make Newton read data
# from just one correct place and leave just compatibility code.
#
# Right now - let's move it to versioned objects to be able to make
# non-backward compatible changes.
volume_properties = objects.VolumeProperties(**volume_properties)
return {
'volume_id': volume['id'],
'volume_properties': volume_properties,
# NOTE(harlowja): it appears like further usage of this volume
# result actually depend on it being a sqlalchemy object and not
# just a plain dictionary so that's why we are storing this here.
#
# In the future where this task results can be serialized and
# restored automatically for continued running we will need to
# resolve the serialization & recreation of this object since raw
# sqlalchemy objects can't be serialized.
'volume': volume,
}
def revert(self, context, result, optional_args, **kwargs):
if isinstance(result, ft.Failure):
# We never produced a result and therefore can't destroy anything.
return
if optional_args['is_quota_committed']:
# If quota got committed we shouldn't rollback as the volume has
# already been created and the quota has already been absorbed.
return
volume = result['volume']
try:
volume.destroy()
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
#
# NOTE(harlowja): Being unable to destroy a volume is pretty
# bad though!!
LOG.exception(_LE("Failed destroying volume entry %s"), volume.id)
class QuotaReserveTask(flow_utils.CinderTask):
"""Reserves a single volume with the given size & the given volume type.
Reversion strategy: rollback the quota reservation.
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
default_provides = set(['reservations'])
def __init__(self):
super(QuotaReserveTask, self).__init__(addons=[ACTION])
def execute(self, context, size, volume_type_id, optional_args):
try:
values = {'per_volume_gigabytes': size}
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
quotas = e.kwargs['quotas']
raise exception.VolumeSizeExceedsLimit(
size=size, limit=quotas['per_volume_gigabytes'])
try:
reserve_opts = {'volumes': 1, 'gigabytes': size}
QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(context, **reserve_opts)
return {
'reservations': reservations,
}
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='volumes',
size=size)
def revert(self, context, result, optional_args, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
if optional_args['is_quota_committed']:
# The reservations have already been committed and can not be
# rolled back at this point.
return
# We actually produced an output that we can revert so lets attempt
# to use said output to rollback the reservation.
reservations = result['reservations']
try:
QUOTAS.rollback(context, reservations)
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
LOG.exception(_LE("Failed rolling back quota for"
" %s reservations"), reservations)
class QuotaCommitTask(flow_utils.CinderTask):
"""Commits the reservation.
Reversion strategy: N/A (the rollback will be handled by the task that did
the initial reservation (see: QuotaReserveTask).
Warning Warning: if the process that is running this reserve and commit
process fails (or is killed before the quota is rolled back or committed
it does appear like the quota will never be rolled back). This makes
software upgrades hard (inflight operations will need to be stopped or
allowed to complete before the upgrade can occur). *In the future* when
taskflow has persistence built-in this should be easier to correct via
an automated or manual process.
"""
def __init__(self):
super(QuotaCommitTask, self).__init__(addons=[ACTION])
def execute(self, context, reservations, volume_properties,
optional_args):
QUOTAS.commit(context, reservations)
# updating is_quota_committed attribute of optional_args dictionary
optional_args['is_quota_committed'] = True
return {'volume_properties': volume_properties}
def revert(self, context, result, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
volume = result['volume_properties']
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=context.project_id,
**reserve_opts)
if reservations:
QUOTAS.commit(context, reservations,
project_id=context.project_id)
except Exception:
LOG.exception(_LE("Failed to update quota for deleting "
"volume: %s"), volume['id'])
class VolumeCastTask(flow_utils.CinderTask):
"""Performs a volume create cast to the scheduler or to the volume manager.
This will signal a transition of the api workflow to another child and/or
related workflow on another component.
Reversion strategy: rollback source volume status and error out newly
created volume.
"""
def __init__(self, scheduler_rpcapi, volume_rpcapi, db):
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
'source_volid', 'volume_id', 'volume', 'volume_type',
'volume_properties', 'source_replicaid',
'consistencygroup_id', 'cgsnapshot_id', 'group_id', ]
super(VolumeCastTask, self).__init__(addons=[ACTION],
requires=requires)
self.volume_rpcapi = volume_rpcapi
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
def _cast_create_volume(self, context, request_spec, filter_properties):
source_volid = request_spec['source_volid']
source_replicaid = request_spec['source_replicaid']
volume_id = request_spec['volume_id']
volume = request_spec['volume']
snapshot_id = request_spec['snapshot_id']
image_id = request_spec['image_id']
cgroup_id = request_spec['consistencygroup_id']
host = None
cgsnapshot_id = request_spec['cgsnapshot_id']
group_id = request_spec['group_id']
if cgroup_id:
# If cgroup_id existed, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as CG's backend.
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
elif group_id:
# If group_id exists, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as group's backend.
group = objects.Group.get_by_id(context, group_id)
# FIXME(wanghao): group_backend got added before request_spec was
# converted to versioned objects. We should make sure that this
# will be handled by object version translations once we add
# RequestSpec object.
request_spec['group_backend'] = vol_utils.extract_host(group.host)
elif snapshot_id and CONF.snapshot_same_host:
# NOTE(Rongze Zhu): A simple solution for bug 1008866.
#
# If snapshot_id is set and CONF.snapshot_same_host is True, make
# the call create volume directly to the volume host where the
# snapshot resides instead of passing it through the scheduler, so
# snapshot can be copied to the new volume.
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
source_volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
host = source_volume_ref.host
elif source_volid:
source_volume_ref = objects.Volume.get_by_id(context,
source_volid)
host = source_volume_ref.host
elif source_replicaid:
source_volume_ref = objects.Volume.get_by_id(context,
source_replicaid)
host = source_volume_ref.host
if not host:
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this volume.
self.scheduler_rpcapi.create_volume(
context,
constants.VOLUME_TOPIC,
volume_id,
snapshot_id=snapshot_id,
image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties,
volume=volume)
else:
# Bypass the scheduler and send the request directly to the volume
# manager.
volume.host = host
volume.scheduled_at = timeutils.utcnow()
volume.save()
if not cgsnapshot_id:
self.volume_rpcapi.create_volume(
context,
volume,
volume.host,
request_spec,
filter_properties,
allow_reschedule=False)
def execute(self, context, **kwargs):
scheduler_hints = kwargs.pop('scheduler_hints', None)
db_vt = kwargs.pop('volume_type')
kwargs['volume_type'] = None
if db_vt:
kwargs['volume_type'] = objects.VolumeType()
objects.VolumeType()._from_db_object(context,
kwargs['volume_type'], db_vt)
request_spec = objects.RequestSpec(**kwargs)
filter_properties = {}
if scheduler_hints:
filter_properties['scheduler_hints'] = scheduler_hints
self._cast_create_volume(context, request_spec, filter_properties)
def revert(self, context, result, flow_failures, volume, **kwargs):
if isinstance(result, ft.Failure):
return
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, kwargs)
common.error_out(volume)
LOG.error(_LE("Volume %s: create failed"), volume.id)
exc_info = False
if all(flow_failures[-1].exc_info):
exc_info = flow_failures[-1].exc_info
LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def get_flow(db_api, image_service_api, availability_zones, create_what,
scheduler_rpcapi=None, volume_rpcapi=None):
"""Constructs and returns the api entrypoint flow.
This flow will do the following:
1. Inject keys & values for dependent tasks.
2. Extracts and validates the input keys & values.
3. Reserves the quota (reverts quota on any failures).
4. Creates the database entry.
5. Commits the quota.
6. Casts to volume manager or scheduler for further processing.
"""
flow_name = ACTION.replace(":", "_") + "_api"
api_flow = linear_flow.Flow(flow_name)
api_flow.add(ExtractVolumeRequestTask(
image_service_api,
availability_zones,
rebind={'size': 'raw_size',
'availability_zone': 'raw_availability_zone',
'volume_type': 'raw_volume_type'}))
api_flow.add(QuotaReserveTask(),
EntryCreateTask(db_api),
QuotaCommitTask())
if scheduler_rpcapi and volume_rpcapi:
# This will cast it out to either the scheduler or volume manager via
# the rpc apis provided.
api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(api_flow, store=create_what)
|
p0psicles/SickGear | refs/heads/master | lib/hachoir_parser/archive/zip.py | 90 | """
Zip splitter.
Status: can read most important headers
Authors: Christophe Gisquet and Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, Enum,
TimeDateMSDOS32, SubFile,
UInt8, UInt16, UInt32, UInt64,
String, PascalString16,
RawBytes)
from lib.hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from lib.hachoir_core.error import HACHOIR_ERRORS
from lib.hachoir_core.tools import makeUnicode
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.deflate import Deflate
MAX_FILESIZE = 1000 * 1024 * 1024
COMPRESSION_DEFLATE = 8
COMPRESSION_METHOD = {
0: u"no compression",
1: u"Shrunk",
2: u"Reduced (factor 1)",
3: u"Reduced (factor 2)",
4: u"Reduced (factor 3)",
5: u"Reduced (factor 4)",
6: u"Imploded",
7: u"Tokenizing",
8: u"Deflate",
9: u"Deflate64",
10: u"PKWARE Imploding",
11: u"Reserved by PKWARE",
12: u"File is compressed using BZIP2 algorithm",
13: u"Reserved by PKWARE",
14: u"LZMA (EFS)",
15: u"Reserved by PKWARE",
16: u"Reserved by PKWARE",
17: u"Reserved by PKWARE",
18: u"File is compressed using IBM TERSE (new)",
19: u"IBM LZ77 z Architecture (PFS)",
98: u"PPMd version I, Rev 1",
}
def ZipRevision(field):
return "%u.%u" % divmod(field.value, 10)
class ZipVersion(FieldSet):
static_size = 16
HOST_OS = {
0: u"FAT file system (DOS, OS/2, NT)",
1: u"Amiga",
2: u"VMS (VAX or Alpha AXP)",
3: u"Unix",
4: u"VM/CMS",
5: u"Atari",
6: u"HPFS file system (OS/2, NT 3.x)",
7: u"Macintosh",
8: u"Z-System",
9: u"CP/M",
10: u"TOPS-20",
11: u"NTFS file system (NT)",
12: u"SMS/QDOS",
13: u"Acorn RISC OS",
14: u"VFAT file system (Win95, NT)",
15: u"MVS",
16: u"BeOS (BeBox or PowerMac)",
17: u"Tandem",
}
def createFields(self):
yield textHandler(UInt8(self, "zip_version", "ZIP version"), ZipRevision)
yield Enum(UInt8(self, "host_os", "ZIP Host OS"), self.HOST_OS)
class ZipGeneralFlags(FieldSet):
static_size = 16
def createFields(self):
# Need the compression info from the parent, and that is the byte following
method = self.stream.readBits(self.absolute_address+16, 16, LITTLE_ENDIAN)
yield Bits(self, "unused[]", 2, "Unused")
yield Bit(self, "encrypted_central_dir", "Selected data values in the Local Header are masked")
yield Bit(self, "incomplete", "Reserved by PKWARE for enhanced compression.")
yield Bit(self, "uses_unicode", "Filename and comments are in UTF-8")
yield Bits(self, "unused[]", 4, "Unused")
yield Bit(self, "strong_encrypt", "Strong encryption (version >= 50)")
yield Bit(self, "is_patched", "File is compressed with patched data?")
yield Bit(self, "enhanced_deflate", "Reserved for use with method 8")
yield Bit(self, "has_descriptor",
"Compressed data followed by descriptor?")
if method == 6:
yield Bit(self, "use_8k_sliding", "Use 8K sliding dictionary (instead of 4K)")
yield Bit(self, "use_3shannon", "Use a 3 Shannon-Fano tree (instead of 2 Shannon-Fano)")
elif method in (8, 9):
NAME = {
0: "Normal compression",
1: "Maximum compression",
2: "Fast compression",
3: "Super Fast compression"
}
yield Enum(Bits(self, "method", 2), NAME)
elif method == 14: #LZMA
yield Bit(self, "lzma_eos", "LZMA stream is ended with a EndOfStream marker")
yield Bit(self, "unused[]")
else:
yield Bits(self, "compression_info", 2)
yield Bit(self, "is_encrypted", "File is encrypted?")
class ExtraField(FieldSet):
EXTRA_FIELD_ID = {
0x0007: "AV Info",
0x0009: "OS/2 extended attributes (also Info-ZIP)",
0x000a: "PKWARE Win95/WinNT FileTimes", # undocumented!
0x000c: "PKWARE VAX/VMS (also Info-ZIP)",
0x000d: "PKWARE Unix",
0x000f: "Patch Descriptor",
0x07c8: "Info-ZIP Macintosh (old, J. Lee)",
0x2605: "ZipIt Macintosh (first version)",
0x2705: "ZipIt Macintosh v 1.3.5 and newer (w/o full filename)",
0x334d: "Info-ZIP Macintosh (new, D. Haase Mac3 field)",
0x4341: "Acorn/SparkFS (David Pilling)",
0x4453: "Windows NT security descriptor (binary ACL)",
0x4704: "VM/CMS",
0x470f: "MVS",
0x4b46: "FWKCS MD5 (third party, see below)",
0x4c41: "OS/2 access control list (text ACL)",
0x4d49: "Info-ZIP VMS (VAX or Alpha)",
0x5356: "AOS/VS (binary ACL)",
0x5455: "extended timestamp",
0x5855: "Info-ZIP Unix (original; also OS/2, NT, etc.)",
0x6542: "BeOS (BeBox, PowerMac, etc.)",
0x756e: "ASi Unix",
0x7855: "Info-ZIP Unix (new)",
0xfb4a: "SMS/QDOS",
}
def createFields(self):
yield Enum(UInt16(self, "field_id", "Extra field ID"),
self.EXTRA_FIELD_ID)
size = UInt16(self, "field_data_size", "Extra field data size")
yield size
if size.value > 0:
yield RawBytes(self, "field_data", size, "Unknown field data")
def ZipStartCommonFields(self):
yield ZipVersion(self, "version_needed", "Version needed")
yield ZipGeneralFlags(self, "flags", "General purpose flag")
yield Enum(UInt16(self, "compression", "Compression method"),
COMPRESSION_METHOD)
yield TimeDateMSDOS32(self, "last_mod", "Last modification file time")
yield textHandler(UInt32(self, "crc32", "CRC-32"), hexadecimal)
yield UInt32(self, "compressed_size", "Compressed size")
yield UInt32(self, "uncompressed_size", "Uncompressed size")
yield UInt16(self, "filename_length", "Filename length")
yield UInt16(self, "extra_length", "Extra fields length")
def zipGetCharset(self):
if self["flags/uses_unicode"].value:
return "UTF-8"
else:
return "ISO-8859-15"
class ZipCentralDirectory(FieldSet):
HEADER = 0x02014b50
def createFields(self):
yield ZipVersion(self, "version_made_by", "Version made by")
for field in ZipStartCommonFields(self):
yield field
# Check unicode status
charset = zipGetCharset(self)
yield UInt16(self, "comment_length", "Comment length")
yield UInt16(self, "disk_number_start", "Disk number start")
yield UInt16(self, "internal_attr", "Internal file attributes")
yield UInt32(self, "external_attr", "External file attributes")
yield UInt32(self, "offset_header", "Relative offset of local header")
yield String(self, "filename", self["filename_length"].value,
"Filename", charset=charset)
if 0 < self["extra_length"].value:
yield RawBytes(self, "extra", self["extra_length"].value,
"Extra fields")
if 0 < self["comment_length"].value:
yield String(self, "comment", self["comment_length"].value,
"Comment", charset=charset)
def createDescription(self):
return "Central directory: %s" % self["filename"].display
class Zip64EndCentralDirectory(FieldSet):
HEADER = 0x06064b50
def createFields(self):
yield UInt64(self, "zip64_end_size",
"Size of zip64 end of central directory record")
yield ZipVersion(self, "version_made_by", "Version made by")
yield ZipVersion(self, "version_needed", "Version needed to extract")
yield UInt32(self, "number_disk", "Number of this disk")
yield UInt32(self, "number_disk2",
"Number of the disk with the start of the central directory")
yield UInt64(self, "number_entries",
"Total number of entries in the central directory on this disk")
yield UInt64(self, "number_entries2",
"Total number of entries in the central directory")
yield UInt64(self, "size", "Size of the central directory")
yield UInt64(self, "offset", "Offset of start of central directory")
if 0 < self["zip64_end_size"].value:
yield RawBytes(self, "data_sector", self["zip64_end_size"].value,
"zip64 extensible data sector")
class ZipEndCentralDirectory(FieldSet):
HEADER = 0x06054b50
def createFields(self):
yield UInt16(self, "number_disk", "Number of this disk")
yield UInt16(self, "number_disk2", "Number in the central dir")
yield UInt16(self, "total_number_disk",
"Total number of entries in this disk")
yield UInt16(self, "total_number_disk2",
"Total number of entries in the central dir")
yield UInt32(self, "size", "Size of the central directory")
yield UInt32(self, "offset", "Offset of start of central directory")
yield PascalString16(self, "comment", "ZIP comment")
class ZipDataDescriptor(FieldSet):
HEADER_STRING = "\x50\x4B\x07\x08"
HEADER = 0x08074B50
static_size = 96
def createFields(self):
yield textHandler(UInt32(self, "file_crc32",
"Checksum (CRC32)"), hexadecimal)
yield filesizeHandler(UInt32(self, "file_compressed_size",
"Compressed size (bytes)"))
yield filesizeHandler(UInt32(self, "file_uncompressed_size",
"Uncompressed size (bytes)"))
class FileEntry(FieldSet):
HEADER = 0x04034B50
filename = None
def data(self, size):
compression = self["compression"].value
if compression == 0:
return SubFile(self, "data", size, filename=self.filename)
compressed = SubFile(self, "compressed_data", size, filename=self.filename)
if compression == COMPRESSION_DEFLATE:
return Deflate(compressed)
else:
return compressed
def resync(self):
# Non-seekable output, search the next data descriptor
size = self.stream.searchBytesLength(ZipDataDescriptor.HEADER_STRING, False,
self.absolute_address+self.current_size)
if size <= 0:
raise ParserError("Couldn't resync to %s" %
ZipDataDescriptor.HEADER_STRING)
yield self.data(size)
yield textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
data_desc = ZipDataDescriptor(self, "data_desc", "Data descriptor")
#self.info("Resynced!")
yield data_desc
# The above could be checked anytime, but we prefer trying parsing
# than aborting
if self["crc32"].value == 0 and \
data_desc["file_compressed_size"].value != size:
raise ParserError("Bad resync: position=>%i but data_desc=>%i" %
(size, data_desc["file_compressed_size"].value))
def createFields(self):
for field in ZipStartCommonFields(self):
yield field
length = self["filename_length"].value
if length:
filename = String(self, "filename", length, "Filename",
charset=zipGetCharset(self))
yield filename
self.filename = filename.value
if self["extra_length"].value:
yield RawBytes(self, "extra", self["extra_length"].value, "Extra")
size = self["compressed_size"].value
if size > 0:
yield self.data(size)
elif self["flags/incomplete"].value:
for field in self.resync():
yield field
if self["flags/has_descriptor"].value:
yield ZipDataDescriptor(self, "data_desc", "Data descriptor")
def createDescription(self):
return "File entry: %s (%s)" % \
(self["filename"].value, self["compressed_size"].display)
def validate(self):
if self["compression"].value not in COMPRESSION_METHOD:
return "Unknown compression method (%u)" % self["compression"].value
return ""
class ZipSignature(FieldSet):
HEADER = 0x05054B50
def createFields(self):
yield PascalString16(self, "signature", "Signature")
class Zip64EndCentralDirectoryLocator(FieldSet):
HEADER = 0x07064b50
def createFields(self):
yield UInt32(self, "disk_number", \
"Number of the disk with the start of the zip64 end of central directory")
yield UInt64(self, "relative_offset", \
"Relative offset of the zip64 end of central directory record")
yield UInt32(self, "disk_total_number", "Total number of disks")
class ZipFile(Parser):
endian = LITTLE_ENDIAN
MIME_TYPES = {
# Default ZIP archive
u"application/zip": "zip",
u"application/x-zip": "zip",
# Java archive (JAR)
u"application/x-jar": "jar",
u"application/java-archive": "jar",
# OpenOffice 1.0
u"application/vnd.sun.xml.calc": "sxc",
u"application/vnd.sun.xml.draw": "sxd",
u"application/vnd.sun.xml.impress": "sxi",
u"application/vnd.sun.xml.writer": "sxw",
u"application/vnd.sun.xml.math": "sxm",
# OpenOffice 1.0 (template)
u"application/vnd.sun.xml.calc.template": "stc",
u"application/vnd.sun.xml.draw.template": "std",
u"application/vnd.sun.xml.impress.template": "sti",
u"application/vnd.sun.xml.writer.template": "stw",
u"application/vnd.sun.xml.writer.global": "sxg",
# OpenDocument
u"application/vnd.oasis.opendocument.chart": "odc",
u"application/vnd.oasis.opendocument.image": "odi",
u"application/vnd.oasis.opendocument.database": "odb",
u"application/vnd.oasis.opendocument.formula": "odf",
u"application/vnd.oasis.opendocument.graphics": "odg",
u"application/vnd.oasis.opendocument.presentation": "odp",
u"application/vnd.oasis.opendocument.spreadsheet": "ods",
u"application/vnd.oasis.opendocument.text": "odt",
u"application/vnd.oasis.opendocument.text-master": "odm",
# OpenDocument (template)
u"application/vnd.oasis.opendocument.graphics-template": "otg",
u"application/vnd.oasis.opendocument.presentation-template": "otp",
u"application/vnd.oasis.opendocument.spreadsheet-template": "ots",
u"application/vnd.oasis.opendocument.text-template": "ott",
}
PARSER_TAGS = {
"id": "zip",
"category": "archive",
"file_ext": tuple(MIME_TYPES.itervalues()),
"mime": tuple(MIME_TYPES.iterkeys()),
"magic": (("PK\3\4", 0),),
"subfile": "skip",
"min_size": (4 + 26)*8, # header + file entry
"description": "ZIP archive"
}
def validate(self):
if self["header[0]"].value != FileEntry.HEADER:
return "Invalid magic"
try:
file0 = self["file[0]"]
except HACHOIR_ERRORS, err:
return "Unable to get file #0"
err = file0.validate()
if err:
return "File #0: %s" % err
return True
def createFields(self):
# File data
self.signature = None
self.central_directory = []
while not self.eof:
header = textHandler(UInt32(self, "header[]", "Header"), hexadecimal)
yield header
header = header.value
if header == FileEntry.HEADER:
yield FileEntry(self, "file[]")
elif header == ZipDataDescriptor.HEADER:
yield ZipDataDescriptor(self, "spanning[]")
elif header == 0x30304b50:
yield ZipDataDescriptor(self, "temporary_spanning[]")
elif header == ZipCentralDirectory.HEADER:
yield ZipCentralDirectory(self, "central_directory[]")
elif header == ZipEndCentralDirectory.HEADER:
yield ZipEndCentralDirectory(self, "end_central_directory", "End of central directory")
elif header == Zip64EndCentralDirectory.HEADER:
yield Zip64EndCentralDirectory(self, "end64_central_directory", "ZIP64 end of central directory")
elif header == ZipSignature.HEADER:
yield ZipSignature(self, "signature", "Signature")
elif header == Zip64EndCentralDirectoryLocator.HEADER:
yield Zip64EndCentralDirectoryLocator(self, "end_locator", "ZIP64 Enf of central directory locator")
else:
raise ParserError("Error, unknown ZIP header (0x%08X)." % header)
def createMimeType(self):
if self["file[0]/filename"].value == "mimetype":
return makeUnicode(self["file[0]/data"].value)
else:
return u"application/zip"
def createFilenameSuffix(self):
if self["file[0]/filename"].value == "mimetype":
mime = self["file[0]/compressed_data"].value
if mime in self.MIME_TYPES:
return "." + self.MIME_TYPES[mime]
return ".zip"
def createContentSize(self):
start = 0
end = MAX_FILESIZE * 8
end = self.stream.searchBytes("PK\5\6", start, end)
if end is not None:
return end + 22*8
return None
|
mat650/metagoofil | refs/heads/master | hachoir_parser/misc/ole2_util.py | 74 | from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN
from hachoir_core.field import RawBytes, RootSeekableFieldSet, ParserError
from hachoir_parser import HachoirParser
class OLE2FragmentParser(HachoirParser,RootSeekableFieldSet):
tags = {
"description": "Microsoft Office document subfragments",
}
endian = LITTLE_ENDIAN
ENDIAN_CHECK=False
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
if self.ENDIAN_CHECK:
if self["endian"].value == "\xFF\xFE":
self.endian = BIG_ENDIAN
elif self["endian"].value == "\xFE\xFF":
self.endian = LITTLE_ENDIAN
else:
raise ParserError("OLE2: Invalid endian value")
def validate(self):
if self.ENDIAN_CHECK:
if self["endian"].value not in ["\xFF\xFE", "\xFE\xFF"]:
return "Unknown endian value %s"%self["endian"].value.encode('hex')
return True
class RawParser(OLE2FragmentParser):
ENDIAN_CHECK=False
OS_CHECK=False
def createFields(self):
yield RawBytes(self,"rawdata",self.datasize)
if self.datasize<self.size//8: yield RawBytes(self,"slack_space",(self.size//8)-self.datasize)
|
silly-wacky-3-town-toon/SOURCE-COD | refs/heads/master | toontown/ai/DistributedHydrantZeroMgrAI.py | 5 | from direct.directnotify import DirectNotifyGlobal
from toontown.ai.DistributedPhaseEventMgrAI import DistributedPhaseEventMgrAI
class DistributedHydrantZeroMgrAI(DistributedPhaseEventMgrAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedHydrantZeroMgrAI")
|
riklaunim/django-custom-multisite | refs/heads/master | tests/modeltests/model_package/models/article.py | 150 | from django.contrib.sites.models import Site
from django.db import models
class Article(models.Model):
sites = models.ManyToManyField(Site)
headline = models.CharField(max_length=100)
publications = models.ManyToManyField("model_package.Publication", null=True, blank=True,)
class Meta:
app_label = 'model_package'
|
Dhivyap/ansible | refs/heads/devel | lib/ansible/plugins/test/__init__.py | 673 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
LibriCerule/Cerulean_Tracking | refs/heads/master | env/lib/python3.5/site-packages/requests/packages/urllib3/contrib/ntlmpool.py | 1009 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
huongttlan/statsmodels | refs/heads/master | statsmodels/sandbox/examples/example_gam.py | 33 | '''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print("binomial")
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
if example == 3:
print("Poisson")
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
|
WillGuan105/django | refs/heads/master | django/contrib/postgres/fields/array.py | 186 | import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.utils import six
from django.utils.translation import string_concat, ugettext_lazy as _
from .utils import AttributeSetter
__all__ = ['ArrayField']
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
super(ArrayField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super(ArrayField, self).contribute_to_class(cls, name, **kwargs)
self.base_field.model = cls
def check(self, **kwargs):
errors = super(ArrayField, self).check(**kwargs)
if self.base_field.remote_field:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
hint=None,
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
hint=None,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super(ArrayField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field,
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def value_to_string(self, obj):
values = []
vals = self.value_from_object(obj)
base_field = self.base_field
for val in vals:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super(ArrayField, self).validate(value, model_instance)
for i, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as e:
raise exceptions.ValidationError(
string_concat(self.error_messages['item_invalid'], e.message),
code='item_invalid',
params={'nth': i},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def run_validators(self, value):
super(ArrayField, self).run_validators(value)
for i, part in enumerate(value):
try:
self.base_field.run_validators(part)
except exceptions.ValidationError as e:
raise exceptions.ValidationError(
string_concat(self.error_messages['item_invalid'], ' '.join(e.messages)),
code='item_invalid',
params={'nth': i},
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ArrayField, self).formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super(ArrayContains, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super(ArrayContainedBy, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super(ArrayOverlap, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'array_length(%s, 1)' % lhs, params
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory(object):
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
|
patilsangram/erpnext | refs/heads/develop | erpnext/selling/doctype/lead_source/lead_source.py | 53 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class LeadSource(Document):
pass
|
walteranderson/python-koans | refs/heads/master | python3/koans/about_dictionaries.py | 91 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertDictEqual({}, empty_dict)
self.assertEqual(__, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = { 'one': 'uno', 'two': 'dos' }
self.assertEqual(__, len(babel_fish))
def test_accessing_dictionaries(self):
babel_fish = { 'one': 'uno', 'two': 'dos' }
self.assertEqual(__, babel_fish['one'])
self.assertEqual(__, babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = { 'one': 'uno', 'two': 'dos' }
babel_fish['one'] = 'eins'
expected = { 'two': 'dos', 'one': __ }
self.assertDictEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = { 'one': 'uno', 'two': 'dos' }
dict2 = { 'two': 'dos', 'one': 'uno' }
self.assertEqual(__, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish.keys()))
self.assertEqual(__, len(babel_fish.values()))
self.assertEqual(__, 'one' in babel_fish.keys())
self.assertEqual(__, 'two' in babel_fish.values())
self.assertEqual(__, 'uno' in babel_fish.keys())
self.assertEqual(__, 'dos' in babel_fish.values())
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf', 'confused looking zebra'), 42)
self.assertEqual(__, len(cards))
self.assertEqual(__, cards['green elf'])
self.assertEqual(__, cards['yellow dwarf'])
|
TeamSWAP/swap | refs/heads/master | external/pyinstaller/PyInstaller/depend/impdirector.py | 10 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
ImportDirectors live on the metapath.
There's one for builtins and one for sys.path.
Windows gets one for modules gotten from the Registry
There should be one for Frozen modules
Mac would have them for PY_RESOURCE modules etc.
A generalization of Owner - their concept of "turf" is broader
"""
import os
import sys
import imp
import marshal
from PyInstaller import depend
import PyInstaller.depend.owner
import PyInstaller.log as logging
logger = logging.getLogger(__name__)
def getDescr(fnm):
ext = os.path.splitext(fnm)[1]
for (suffix, mode, typ) in imp.get_suffixes():
if suffix == ext:
return (suffix, mode, typ)
class ImportDirector(PyInstaller.depend.owner.Owner):
pass
class BuiltinImportDirector(ImportDirector):
def __init__(self):
self.path = 'Builtins'
def getmod(self, nm, isbuiltin=imp.is_builtin):
if isbuiltin(nm):
return depend.modules.BuiltinModule(nm)
return None
class RegistryImportDirector(ImportDirector):
# for Windows only
def __init__(self):
self.path = "WindowsRegistry"
self.map = {}
try:
import win32api
import win32con
except ImportError:
return
subkey = r"Software\Python\PythonCore\%s\Modules" % sys.winver
for root in (win32con.HKEY_CURRENT_USER, win32con.HKEY_LOCAL_MACHINE):
try:
hkey = win32api.RegOpenKeyEx(root, subkey, 0, win32con.KEY_READ)
except Exception, e:
logger.debug('RegistryImportDirector: %s' % e)
continue
numsubkeys, numvalues, lastmodified = win32api.RegQueryInfoKey(hkey)
for i in range(numsubkeys):
subkeyname = win32api.RegEnumKey(hkey, i)
hskey = win32api.RegOpenKeyEx(hkey, subkeyname, 0, win32con.KEY_READ)
val = win32api.RegQueryValueEx(hskey, '')
desc = getDescr(val[0])
#print " RegistryImportDirector got %s %s" % (val[0], desc) #XXX
self.map[subkeyname] = (val[0], desc)
hskey.Close()
hkey.Close()
break
def getmod(self, nm, loadco=marshal.loads):
stuff = self.map.get(nm)
if stuff:
fnm, (suffix, mode, typ) = stuff
if typ == imp.C_EXTENSION:
return depend.modules.ExtensionModule(nm, fnm)
elif typ == imp.PY_SOURCE:
try:
stuff = open(fnm, 'rU').read() + '\n'
co = compile(stuff, fnm, 'exec')
except SyntaxError, e:
logger.exception(e)
raise SystemExit(10)
else:
stuff = open(fnm, 'rb').read()
co = loadco(stuff[8:])
return depend.modules.PyModule(nm, fnm, co)
return None
class PathImportDirector(ImportDirector):
def __init__(self, pathlist=None, importers=None):
if pathlist is None:
self.path = sys.path
else:
self.path = pathlist
self.ownertypes = filter(None, [
PyInstaller.depend.owner.DirOwner,
PyInstaller.depend.owner.ZipOwner,
PyInstaller.depend.owner.PYZOwner,
PyInstaller.depend.owner.Owner,
])
if importers:
self.shadowpath = importers
else:
self.shadowpath = {}
self.building = set()
def __str__(self):
return str(self.path)
def getmod(self, nm):
mod = None
for thing in self.path:
if isinstance(thing, basestring):
owner = self.shadowpath.get(thing, -1)
if owner == -1:
owner = self.shadowpath[thing] = self.__makeOwner(thing)
if owner:
mod = owner.getmod(nm)
else:
mod = thing.getmod(nm)
if mod:
break
return mod
def __makeOwner(self, path):
if path in self.building:
return None
self.building.add(path)
owner = None
for klass in self.ownertypes:
try:
# this may cause an import, which may cause recursion
# hence the protection
owner = klass(path)
except PyInstaller.depend.owner.OwnerError:
pass
except Exception, e:
#print "FIXME: Wrong exception", e
pass
else:
break
self.building.remove(path)
return owner
|
asedunov/intellij-community | refs/heads/master | python/lib/Lib/dummy_threading.py | 102 | """Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``thread`` was available which is not desired.
:Author: Brett Cannon
:Contact: brett@python.org
XXX: Try to get rid of ``_dummy_threading``.
"""
from sys import modules as sys_modules
import dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if 'thread' in sys_modules:
held_thread = sys_modules['thread']
holding_thread = True
# Must have some module named ``thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['thread'] = sys_modules['dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['thread'] = held_thread
del held_thread
else:
del sys_modules['thread']
del holding_thread
del dummy_thread
del sys_modules
|
OpenVnmrJ/OpenVnmrJ | refs/heads/master | src/ddr/adm/acq/verifyCntlrsFlash.py | 1 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
This script Verifies the VNMRS Digital Controllers Flash Contents
It Compares the files on the controller's flash to those in /Vnmr/acq/download
If it finds mismatches , it downdloads the files from /vnmr/acq/download to
the controller's flash. Unless the -y option is given, permission to update the files is requested.
This script use the Linux rsh facility to communicate with the controllers.
Notes:
Before running this script.
Besure the Acquisition processes are stopped
i.e. acqcomm stop
Close all remote logins to any controllers
Reflashing can take considerable amount of time, especially for nddslib, be patient!
The VxWorks image can be reflashed (if one deletes it) as long as the controller is still running
and has not been reset/rebooted.
If for any reason the script throws an error, please rerun the script with the additional '-d' option
the debug log file created can be used to determine the cause of problem.
Usage: verifyCntlrsFlash.py [options] [specific controller(s) to check ]
e.g. verifyCntlrsFlash.py - this will check all discovered controllers in the Console
verifyCntlrsFlash.py ddr1 - this will check just the ddr1 controller
verifyCntlrsFlash.py rf1,rf2,ddr1 - this will check just the rf1 rf2 ddr1 controllers
or
verifyCntlrsFlash.py rf1 rf2 ddr1 - this will check just the rf1 rf2 ddr1 controllers
"""
__author__ = "Greg Brissey"
__version__ = "$Revision: 1.0 $"
__date__ = "$Date: 2012/05/22 $"
__license__ = "Python"
import sys, os, glob, datetime, subprocess
import signal
import traceback
import datetime
import collections
import logging
from optparse import OptionParser
# for Progress Bar
import time, threading
#
# ====================================================================================
# terminal control Module
# Copyright: 2009 Nadia Alramli
# License: BSD
# Original Source can be found @ http://nadiana.com/animated-terminal-progress-bar-in-python
# ====================================================================================
#
#print(__name__)
MODULE = sys.modules[__name__]
#print(MODULE)
COLORS = "BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK".split()
# List of terminal controls, you can add more to the list.
CONTROLS = {
'BOL':'cr', 'UP':'cuu1', 'DOWN':'cud1', 'LEFT':'cub1', 'RIGHT':'cuf1',
'CLEAR_SCREEN':'clear', 'CLEAR_EOL':'el', 'CLEAR_BOL':'el1',
'CLEAR_EOS':'ed', 'BOLD':'bold', 'BLINK':'blink', 'DIM':'dim',
'REVERSE':'rev', 'UNDERLINE':'smul', 'NORMAL':'sgr0',
'HIDE_CURSOR':'cinvis', 'SHOW_CURSOR':'cnorm'
}
# List of numeric capabilities
VALUES = {
'COLUMNS':'cols', # Width of the terminal (None for unknown)
'LINES':'lines', # Height of the terminal (None for unknown)
'MAX_COLORS': 'colors',
}
def default():
"""Set the default attribute values"""
for color in COLORS:
setattr(MODULE, color, '')
setattr(MODULE, 'BG_%s' % color, '')
for control in CONTROLS:
setattr(MODULE, control, '')
for value in VALUES:
setattr(MODULE, value, None)
def setup():
"""Set the terminal control strings"""
# Initializing the terminal
curses.setupterm()
# Get the color escape sequence template or '' if not supported
# setab and setaf are for ANSI escape sequences
bgColorSeq = curses.tigetstr('setab') or curses.tigetstr('setb') or ''
fgColorSeq = curses.tigetstr('setaf') or curses.tigetstr('setf') or ''
for color in COLORS:
# Get the color index from curses
colorIndex = getattr(curses, 'COLOR_%s' % color)
# Set the color escape sequence after filling the template with index
setattr(MODULE, color, curses.tparm(fgColorSeq, colorIndex))
# Set background escape sequence
setattr(
MODULE, 'BG_%s' % color, curses.tparm(bgColorSeq, colorIndex)
)
for control in CONTROLS:
# Set the control escape sequence
setattr(MODULE, control, curses.tigetstr(CONTROLS[control]) or '')
for value in VALUES:
# Set terminal related values
setattr(MODULE, value, curses.tigetnum(VALUES[value]))
def render(text):
"""Helper function to apply controls easily
Example:
apply("%(GREEN)s%(BOLD)stext%(NORMAL)s") -> a bold green text
"""
return text % MODULE.__dict__
try:
import curses
setup()
except Exception as e:
# There is a failure; set all attributes to default
print('Warning: %s' % e)
default()
#
# ====================================================================================
# Animated Progress bar
# Copyright: 2009 Nadia Alramli
# License: BSD
# Original Source can be found @ http://nadiana.com/animated-terminal-progress-bar-in-python
# ====================================================================================
#
"""Draws an animated terminal progress bar
Usage:
p = ProgressBar("blue")
p.render(percentage, message)
"""
class ProgressBar(object):
"""Terminal progress bar class"""
TEMPLATE = (
'%(percent)-2s%% %(color)s%(progress)s%(normal)s%(empty)s %(message)s\n'
)
PADDING = 7
def __init__(self, color=None, width=None, block='█', empty=' '):
"""
color -- color name (BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK)
width -- bar width (optinal)
block -- progress display character (default '█')
empty -- bar display character (default ' ')
"""
if color:
# self.color = getattr(terminal, color.upper())
self.color = getattr(__main__, color.upper())
else:
self.color = ''
# if width and width < terminal.COLUMNS - self.PADDING:
if width and width < COLUMNS - self.PADDING:
self.width = width
else:
# Adjust to the width of the terminal
# self.width = terminal.COLUMNS - self.PADDING
self.width = COLUMNS - self.PADDING
self.block = block
self.empty = empty
self.progress = None
self.lines = 0
def render(self, percent, message = ''):
"""Print the progress bar
percent -- the progress percentage %
message -- message string (optional)
"""
inline_msg_len = 0
if message:
# The length of the first line in the message
inline_msg_len = len(message.splitlines()[0])
#if inline_msg_len + self.width + self.PADDING > terminal.COLUMNS:
if inline_msg_len + self.width + self.PADDING > COLUMNS:
# The message is too long to fit in one line.
# Adjust the bar width to fit.
# bar_width = terminal.COLUMNS - inline_msg_len -self.PADDING
bar_width = COLUMNS - inline_msg_len -self.PADDING
else:
bar_width = self.width
# Check if render is called for the first time
if self.progress != None:
self.clear()
self.progress = (bar_width * percent) / 100
data = self.TEMPLATE % {
'percent': percent,
'color': self.color,
'progress': self.block * self.progress,
'normal': NORMAL,
'empty': self.empty * (bar_width - self.progress),
'message': message
}
sys.stdout.write(data)
sys.stdout.flush()
# The number of lines printed
self.lines = len(data.splitlines())
def clear(self):
"""Clear all printed lines"""
#sys.stdout.write(
# self.lines * (terminal.UP + terminal.BOL + terminal.CLEAR_EOL)
#)
sys.stdout.write(
self.lines * (UP + BOL + CLEAR_EOL)
)
#
# ====================================================================================
# Thread to handle the animated progress Bar
# ====================================================================================
#
class ProgressThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, time):
super(ProgressThread, self).__init__()
self.pos = 0
self.time= float(time)
self._stop = threading.Event()
threading.Thread.__init__(self)
def run(self):
self.p = ProgressBar( width=50, block='▣', empty='□')
# calc duration interval for 100%
self.timeincr = self.time / 100.0;
#print self.time
#print self.timeincr
self.cntdwn = self.time
for i in range(101):
self.p.render(i, '\nWriting File to Flash, Remaining Time: %4.1f sec' % (self.cntdwn))
time.sleep(self.timeincr)
self.cntdwn = self.cntdwn - self.timeincr
if self.stopped():
self.p.render(100, '\nWriting File to Flash, Remaining Time: %4.1f sec' % (0.0))
break
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
#
# ====================================================================================
# ====================================================================================
#
# return the index of the 1st occurence of the substring within a list of strings
def first_substring(strings, substring):
try:
result = (i for i, string in enumerate(strings) if substring in string).next()
except Exception as e: # catch StopIteration when it doesn't find the substring, return -1
result = -1
return result
#
# ====================================================================================
# ====================================================================================
#
class BreakHandler:
"""
Trap CTRL-C, set a flag, and keep going. This is very useful for
gracefully exiting database loops while simulating transactions.
To use this, make an instance and then enable it. You can check
whether a break was trapped using the trapped property.
# Create and enable a break handler.
ih = BreakHandler()
ih.enable()
for x in big_set:
complex_operation_1()
complex_operation_2()
complex_operation_3()
# Check whether there was a break.
if ih.trapped:
# Stop the loop.
break
ih.disable()
# Back to usual operation...
"""
def __init__(self, emphatic=9):
'''
Create a new break handler.
@param emphatic: This is the number of times that the user must
press break to *disable* the handler. If you press
break this number of times, the handler is automagically
disabled, and one more break will trigger an old
style keyboard interrupt. The default is nine. This
is a Good Idea, since if you happen to lose your
connection to the handler you can *still* disable it.
'''
self._count = 0
self._enabled = False
self._emphatic = emphatic
self._oldhandler = None
return
def _reset(self):
'''
Reset the trapped status and count. You should not need to use this
directly; instead you can disable the handler and then re-enable it.
This is better, in case someone presses CTRL-C during this operation.
'''
self._count = 0
return
def enable(self):
'''
Enable trapping of the break. This action also resets the
handler count and trapped properties.
'''
if not self._enabled:
self._reset()
self._enabled = True
self._oldhandler = signal.signal(signal.SIGINT, self)
return
def disable(self):
'''
Disable trapping the break. You can check whether a break
was trapped using the count and trapped properties.
'''
if self._enabled:
self._enabled = False
signal.signal(signal.SIGINT, self._oldhandler)
self._oldhandler = None
return
def __call__(self, signame, sf):
'''
An break just occurred. Save information about it and keep
going.
'''
self._count += 1
# If we've exceeded the "emphatic" count disable this handler.
if self._count >= self._emphatic:
self.disable()
return
def __del__(self):
'''
Python is reclaiming this object, so make sure we are disabled.
'''
self.disable()
return
@property
def count(self):
'''
The number of breaks trapped.
'''
return self._count
@property
def trapped(self):
'''
Whether a break was trapped.
'''
return self._count > 0
#
# ====================================================================================
# ====================================================================================
#
#
# rshCmd Execption class
#
class rshCmdError(Exception):
"rsh command exception class"
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#
# rsh commands class for VNMR Digital Controllers
#
class rshCmd:
"issue commands via rsh"
def __init__(self,hostname):
self.dirdic = {}
self.dirinfodic = {}
self.cntlrPrompt='.*> '
self.nddstype = 'None'
self.trap = BreakHandler() # Use this Class to prevent Cntlr-C from interrupting rsh commands
logger.debug(hostname);
cmd = 'rsh -l %s %s' % ('vnmr1',hostname)
logger.debug(cmd)
self.child = pexpect.spawn('rsh -l %s %s'%('vnmr1',hostname))
i = self.child.expect([self.cntlrPrompt, pexpect.EOF, pexpect.TIMEOUT])
logger.debug(self.child.after)
# print i
if i == 0: # Login good
pass
if i == 1: # EOF
raise rshCmdError('rsh Login Failed')
if i == 2: # Timeout
raise rshCmdError('rsh Login Timeout')
def __del__(self):
try:
self.outputlinelist = self.sendCmd('logout',1)
except Exception as e:
pass
def sendCmd(self,cmd,cmdtimeout=10):
self.trap.enable() # prevent Cntlr-C from interrupting rsh commands
self.child.sendline(cmd);
# self.child.expect('.*> ',timeout=cmdtimeout)
result = self.child.expect([self.cntlrPrompt, pexpect.EOF, pexpect.TIMEOUT],timeout=cmdtimeout)
if result == 0:
linelist = self.child.after.split('\r\n');
logger.debug(linelist)
# There maybe extranious strings in the result, so find the echo of the command given
# and make that the beginning string in the list
index = first_substring(linelist,cmd)
logger.debug(index)
linelist = linelist[index:]
else:
logger.debug("EOF or TIMEOUT occurred.")
linelist=[]
self.trap.disable() # disable the Cntlr-C trap
return linelist
@property
def trapped(self):
"""
Whether a break was trapped.
"""
return self.trap._count > 0
def getCpuId(self):
"""
Obtain the Unique PPC ID
"""
outputlinelist = self.sendCmd('get405ECID')
logger.debug(outputlinelist)
index = first_substring(outputlinelist, 'value =')
# print outputlinelist
logger.debug(outputlinelist[index])
d1,d2,cpuId = outputlinelist[index].split('=',2)
return cpuId.strip(' ')
def isIcatAttached(self):
"""
Obtain the Icat ID, 0=no icat, 0xa Icat present
"""
outputlinelist = self.sendCmd('getIcatId');
# if ( 'undefined symbol:' not in outputlinelist[1]):
index = first_substring(outputlinelist, 'undefined symbol:')
logger.debug('undefined symbol: index ' + str(index))
if ( index == -1 ):
# print outputlinelist
# find the answer, use that index to access value
index = first_substring(outputlinelist, 'value =')
d1,d2,icatID = outputlinelist[index].split('=',2)
else:
icatID = '0x0'
if ( icatID.strip(' ') == '0x0'):
answer = False
else:
answer = True
return answer
def getIcatDNA(self):
"""
Obtain the Icat Unique ID
"""
outputlinelist = self.sendCmd('prtIcatDNA');
logger.debug(outputlinelist)
index = first_substring(outputlinelist, 'DNA:')
logger.debug('DNA indexed to: ' + str(index))
# print outputlinelist
if (index != -1):
d1,icatDNA = outputlinelist[index].split(':',1)
else:
icatDNA="Unknown"
return icatDNA.strip(' ')
def getFPGAInfo(self): # doesn't work
outputlinelist = self.sendCmd('checkFpgaVersion');
logger.debug(outputlinelist)
# print outputlinelist
return outputlinelist[1]
def getmd5(self,file):
"""
Obtain the md5 signature of the flash file given
"""
outputlinelist = self.sendCmd('ffmd5 "' + file + '"');
logger.debug(outputlinelist)
# if ( 'undefined symbol:' not in outputlinelist[1]):
index = first_substring(outputlinelist, 'undefined symbol:')
logger.debug('undefined symbol: index ' + str(index))
if ( index == -1 ):
# if ('not found' not in outputlinelist[1]):
index = first_substring(outputlinelist, 'not found')
logger.debug('not found: index ' + str(index))
if ( index == -1 ):
md5index = first_substring(outputlinelist, 'MD5:')
logger.debug('MD5: index ' + str(md5index))
logger.debug(outputlinelist[md5index])
title,ffmd5sig = outputlinelist[md5index].split(' ',1)
else:
logger.debug(outputlinelist[index])
ffmd5sig = outputlinelist[index].strip(' ')
else:
ffmd5sig = 'unknown: Controller not fully booted'
return ffmd5sig
def getNDDSType(self):
# 1st try the command for NDDS 3.x
outputlinelist = self.sendCmd('prtnddsver')
logger.debug(outputlinelist)
# if ( 'undefined symbol:' not in outputlinelist[1]):
index = first_substring(outputlinelist, 'undefined symbol:')
if ( index == -1 ):
result = 'NDDS 4.x'
self.nddstype='4x'
else:
# 2nd try the command for NDDS 4.x
outputlinelist = self.sendCmd('NddsVersionGet')
logger.debug(outputlinelist)
# if ('undefined symbol:' not in outputlinelist[1]):
index = first_substring(outputlinelist, 'undefined symbol:')
if ( index == -1 ):
result = 'NDDS 3.x'
self.nddstype='3x'
else:
# "NDDS libraries do not appear to be loaded"
result = "Indeterminate"
self.nddstype='None'
return result
def copy2cntlr(self,file,filesize,cmdtimeout=300):
# example output of a cp2ffw command
#cp2ffw "/home/vnmr1/test.txt"
#Coping file '/home/vnmr1/test.txt' of 44 bytes to Flash file 'test.txt'
#Copy Successful, CRC match: 0x42eca34a
#Complete.
#value = 0 = 0x0
# approx writting speed of controller flash, Bytes / Second
bytesPerSec = 24352.0
time2write = float(filesize) / bytesPerSec
#if self.isFileOnCntlr(file) == False:
# time2write / 2.0
if ( options.verboseflag == True ):
self.t = ProgressThread(time2write)
self.t.start() # Progress Bar in action while we wait on command to finish
outputlinelist = self.sendCmd('cp2ffw "' + file + '"',cmdtimeout);
if ( options.verboseflag == True ):
self.t.stop() # stop progress bar if not already completed
self.t.join() # wait for thread to terminate
logger.debug(outputlinelist)
# confirm successful copy
# found = outputlinelist[2].find('Successful')
found = first_substring(outputlinelist,'Successful')
logger.debug(found)
if (found != -1):
status = True
else:
status = False
return status
def reboot(self):
outputlinelist = self.sendCmd('reboot 1', 3);
def dir(self):
outputlinelist = self.sendCmd('ffdir');
logger.debug(outputlinelist)
# parse list into a dictionary key filename, tuple bytes,md5
if ( options.verboseflag == True ):
sys.stdout.write("\r\nObtaining Directory Info: ")
else:
sys.stdout.write(" Obtaining Directory Info: ")
sys.stdout.flush()
for lval in outputlinelist[1:-6]:
# print "lval: " + lval
( name, size ) = lval.split('\t ',1)
name = name.strip(' \t')
size = size.strip(' \t')
logger.debug('name: "'+name+'"')
logger.debug('size: "'+size+'"')
self.dirdic[name] = ( size,'0' )
#print self.dirdic
#
# store the directory info as well, number of filesm size, and free space
#
logger.debug(outputlinelist[-5])
( name, size ) = outputlinelist[-5].split('\t',1)
self.dirinfodic['nfiles'] = ( name.strip(' '), size.strip(' \t') )
logger.debug(outputlinelist[-4])
( name, size ) = outputlinelist[-4].split('\t',1)
self.dirinfodic['freespace'] = ( name.strip(' \t'), size.strip(' \t') )
#
# get the md5 signitures for each file
#
for file, ( size, md5) in self.dirdic.items():
sys.stdout.write(".")
sys.stdout.flush()
ffmd5 = self.getmd5(file)
self.dirdic[file] = ( size, ffmd5.strip("'") )
sys.stdout.write("\r\n")
sys.stdout.flush()
logger.debug(self.dirdic)
logger.debug(self.dirinfodic)
return outputlinelist
def icatdir(self):
outputlinelist = self.sendCmd('isfdir');
logger.debug(outputlinelist)
return outputlinelist[1:-2]
def getNumberUsedAndFreeSpace(self):
"""Returns a tuple of three, number of files, total bytesused, free space"""
( number, tsize ) = self.dirinfodic['nfiles']
( dum, fsize ) = self.dirinfodic['freespace']
return ( number, tsize, fsize )
def cmpmd5(self,filename,md5):
"""Compares the filename and md5, against the controllers ffdir listing dictionary"""
try:
( size, ffmd5 ) = self.dirdic[filename]
if ( md5 == ffmd5):
result = True
else:
result = False
except Exception as e:
result = False;
return result
def isFileOnHost(self,files):
"""The list of files given is compared against the file on the controller's flash
a list of files on the flash but not on the host is returned."""
notfoundlist=[]
for filename in self.dirdic.keys():
if filename not in files:
notfoundlist.append(filename)
return notfoundlist
def isFileOnCntlr(self,filename):
"""The list of files given is compared against the file on the controller's flash
a list of files on the flash but not on the host is returned."""
filelist = self.dirdic.keys()
return filename in filelist
# The fail files pri_imagecp.fail and/or sec_imagecp.fail are created if the copy operation fails
# on the primary and/or secondary iCAT FPGA image respectively.
#
# The failure file sec_imageld.fail is created on the RF FFS if the reboot of the secondary image fails.
#
# The fail file icat_config.fail is created if the FORTH interpreter returns an error code.
#
#def getFreeSpace(self):
# ( number, size ) = self.dirinfodic['freespace']
# return size
# return ( number, size )
#
# ====================================================================================
#
def exit_with_usage():
# print globals()['__doc__']
parser.print_help()
os._exit(1)
#
# Calc the MD5 signiture for file given
# Return the calc MD5 Sig.
#
def md5sig(filepath):
md5cmd = 'md5sum ' + filepath
md5proc = subprocess.Popen(md5cmd, shell=True, stdout=subprocess.PIPE)
status = os.waitpid(md5proc.pid, 0)
md5sig,file = md5proc.stdout.read().split(' ',1)
#logger.debug(md5sig)
return md5sig
#
# Calc the MD5 signiture for file given
# Return the calc MD5 Sig.
#
def getfilesize(filepath):
"""-rwxrwxrwx 1 greg greg 1302 2012-08-30 14:44 tst.py"""
ducmd = 'ls -l ' + filepath
md5proc = subprocess.Popen(ducmd, shell=True, stdout=subprocess.PIPE)
status = os.waitpid(md5proc.pid, 0)
# line = md5proc.stdout.read();
perms,lnks,user,grp,size,stuff = md5proc.stdout.read().split(' ',5)
return size
# ducmd = 'du -b ' + filepath
# md5proc = subprocess.Popen(ducmd, shell=True, stdout=subprocess.PIPE)
# status = os.waitpid(md5proc.pid, 0)
# size = md5proc.stdout.read()
#size,file = md5proc.stdout.read().split('\t',1)
# print size
#return size
#
#
# pingIP: ping the hostname or IP given, returns True if response received otherwise False
#
def pingIP(ip):
""" pingIP: ping the hostname or IP given, returns True if response received otherwise False"""
# ping once , timeout in one second if no responce
pingTest = "ping -c 1 -W 1 " + ip
# print pingTest
process = subprocess.Popen(pingTest, shell=True, stdout=subprocess.PIPE)
process.wait()
returnCodeTotal = process.returncode
# print returnCodeTotal
return (returnCodeTotal == 0)
def getEthersList():
"""getEthersList - Obtain the Controllers Names from the /etc/ethers files"""
hostlist = []
file=os.path.join(os.path.sep,'etc','ethers')
# print file
if ( os.path.exists(file) == False ) :
logger.debug("Ethers file: " + file + " - Not present.")
return hostlist
inFile = open(file, 'rb')
for line in inFile:
# print line
# logger.debug(line)
# skip blank or commented '#' lines
if ((line[0] != "#") and (line != "\n")):
#
# grab the MAC filepath from line
#
(mac, cntlrname) = line.split(' ',1)
mac = mac.strip()
# logger.debug(mac)
cntlrname = cntlrname.strip(' \n')
#print cntlrname
# logger.debug(cntlrname)
hostlist.append(cntlrname);
return hostlist
def md5DownloadList():
"""md5DownloadList: create a listing of the downloadable content in the VnmrJ /vnmr/acq/download directory
This list contain the filename and md5 signature"""
#dwnldlist = ( glob.glob('/vnmr/acq/download/*.o') + glob.glob('/vnmr/acq/download/*.bit') +
# glob.glob('/vnmr/acq/download/*.4th') + glob.glob('/vnmr/acq/download/*.bdx') +
# ['/vnmr/acq/download/nvScript'] )
dwnldlist = ( glob.glob(options.dwnldpath+'/*.o') + glob.glob(options.dwnldpath+'/*.bit') +
glob.glob(options.dwnldpath+'/*.4th') + glob.glob(options.dwnldpath+'/*.bdx') +
[options.dwnldpath+'/nvScript'] )
# print dwnldlist
logger.debug(dwnldlist);
dwnlddic = { }
for file in dwnldlist:
# md5 = md5sig("/vnmr/acq/download/ddrexec.o")
md5 = md5sig(file)
size = getfilesize(file)
#
# filename stripped of directory path
#
(path , filename) = os.path.split(file);
#path = path.strip()
#print path
filename = filename.strip(' \n')
dwnlddic[filename] = ( size, md5 )
# print file + " : " + filename + ': ' + md5
# logger.debug(file + " : " + filename + ': ' + md5);
logger.debug(file + " :\t" + md5);
return dwnlddic
#
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
#----------------------------------------------------------------------
#
def main():
hostname = []
reboot=0
#
# Obatin the ist of possible VNMRS Digital Controllers in this syste,
#
hostname = getEthersList()
if len(hostname) == 0:
logger.critical("No Ethers file on this system, Script Aborted")
os._exit(1)
logger.debug('number of hostnames ' + str(len(hostname)) + ' : ' + ', '.join(hostname))
md5sums = {}
md5sums = md5DownloadList()
#
# if not specific controllers given to test, then
# ping the list of controllers obtain from the ether file, and determine
# which are present via ping
#
if ( len(targets) == 0):
activelist = []
sys.stdout.write("Determining Active Controllers, Working: ")
sys.stdout.flush()
for host in hostname:
if ( pingIP(host) == True ):
activelist.append(host)
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\r\n")
sys.stdout.flush()
logger.info(' ')
logger.info('Controllers Found: ' + ', '.join(activelist))
else:
activelist = targets
# unfortantely the collection method is not present for python 2.4 which is on RHEL 5.3 which we must support.
# nddstypedic = collections.defaultdict(list) # uses an initializer function for missing keys, use list() for our case
# rftypedic = collections.defaultdict(list) # uses an initializer function for missing keys, use list() for our case
nddstypedic = {}
rftypedic = {}
for cntlr in activelist:
try:
# attempt to rlogin into the controller
cntlrRsh = rshCmd(cntlr)
except rshCmdError as e:
# print 'rshCmd exception occurred, value:', e.value
logger.info("rsh to Controller %s failed, Check for an open rsh session to controller." % (cntlr))
logger.critical(e.value)
os._exit(1)
nddstype = cntlrRsh.getNDDSType()
# nddstypedic[nddstype].append(cntlr) # see above initialazation that make this work properly
nddstypedic.setdefault(nddstype,[]).append(cntlr)
logger.debug(nddstypedic)
cpuId = cntlrRsh.getCpuId()
#fpgaInfo = cntlrRsh.getFPGAInfo()
#print fpgaInfo
#os._exit(1)
if ( options.verboseflag == True ):
logger.info(' ')
logger.info('------------------------------------------------------------------ ')
logger.info('------------------------------------------------------------------ ')
logger.info(' ')
logger.info('Digital Controller: %s, Unique ID: %s, NDDS Ver: %s' %(cntlr,cpuId, nddstype))
else:
logger.info('Controller %d of %d (%s)' %(activelist.index(cntlr)+1,len(activelist),cntlr))
icatPresent = False
if ( 'rf' in cntlr ):
icatPresent = cntlrRsh.isIcatAttached()
if ( icatPresent ):
# rftypedic['ICAT RF'].append(cntlr) not in python 2.4 that's on RHEL 5.3 or earlier
rftypedic.setdefault('ICAT RF',[]).append(cntlr)
icatDNA = cntlrRsh.getIcatDNA()
# print icatDNA
isflist = cntlrRsh.icatdir()
#print isflist
else:
#rftypedic['VNMRS RF'].append(cntlr) not in python 2.4 that's on RHEL 5.3 or earlier
rftypedic.setdefault('VNMRS RF',[]).append(cntlr)
#
# obtain the FFS listing of the controller with md5, this takes a bit of time
#
dirlist = cntlrRsh.dir()
# logger.info('\r\n'.join(dirlist[1:-2]))
if ( options.verboseflag == True ):
if ( icatPresent ):
logger.info(" ")
logger.info(" iCAT RF attached to this RF Controller, iCAT DNA: %s" % icatDNA)
logger.info(" ")
logger.info(" ")
logger.info(" " + isflist[0])
for line in isflist[1:-2]:
( file, size , md5 ) = line.split('\t',2)
logger.info("%25s %20s %24s" %(file.strip('\t '),size.strip('\t '),md5.strip('\t ')))
logger.info(" " + isflist[-1])
logger.info(' ')
logger.info(' ')
#
# print directory listing
#
( nfiles, fsize, tsize ) = cntlrRsh.getNumberUsedAndFreeSpace()
if ( options.verboseflag == True ):
logger.info(" ")
logger.info("Controller Directory listing: %11s %20s, %20s" % (nfiles, fsize, tsize))
logger.info(" ")
for file, ( size, ffmd5) in cntlrRsh.dirdic.items():
logger.info("%25s %15s md5: %24s" %(file,size,ffmd5))
logger.info(' ')
logger.info(' ')
notfoundlist = [ ]
notfoundlist = cntlrRsh.isFileOnHost(md5sums.keys())
notfoundlist.remove('boot.ini') # remove the Visionware bootloader ini file, it's always present
if ( options.verboseflag == True ):
if ( len(notfoundlist) > 0 ):
logger.info("Non-Release files found on Controller: %s" % ', '.join(notfoundlist))
logger.info(' ')
#
# compare md5s if mismatch then download files from host to controller's flash
#
files2update= []
filesup2date= []
for file, (size,md5) in md5sums.items():
logger.debug('Cntlr: %s, file: %s, md5: %s' % (cntlr,file,md5))
result = cntlrRsh.cmpmd5(file,md5)
if (result == False):
if ( 'rf' not in cntlr ):
if ( 'icat' not in file ):
files2update.append(file)
else:
if ( options.verboseflag == True ):
logger.info(' skipping file %s' % file)
else:
files2update.append(file)
else:
filesup2date.append(file)
#logger.info('File: ' + file + ' is up to date')
if ( options.verboseflag == True ):
if ( len(filesup2date) > 0):
logger.info('Files Not Requiring Updating: ')
logger.info(" ")
for file in filesup2date:
logger.info("%30s" % (file))
else:
logger.info('Files Not Requiring Updating: None')
logger.info(" ")
if ( len(files2update) > 0):
logger.info(" ")
logger.info('Files Requiring Updating: ')
logger.info(" ")
for file in files2update:
logger.info("%30s" % (file))
else:
logger.info(" ")
logger.info('Files Requiring Updating: None')
logger.info(" ")
else:
if ( len(files2update) == 1):
logger.info(" %d file requires updating" % len(files2update))
else:
logger.info(" %d files require updating" % len(files2update))
# only ask to update if there are files to update
if ( (len(files2update) > 0) and (options.noupdateflag == False) ):
if (options.autoupdateflag == True):
update = True
else:
answer = raw_input('\r\nUpdate the files? (y/n): ')
# print answer
if ( answer in [ 'y', 'yes', 'Y', 'Yes','YES' ] ):
update = True
else:
update = False
else:
update = False
#
########################################################################################
# File Transfer Section
########################################################################################
#
# Update or Not?
#
if (update == True):
failures=False
#
# copy the files from the host to the controller for the files that need to be updated
#
for file in files2update:
filepath = os.path.join(options.dwnldpath,file)
logger.debug("download host file: " + filepath)
if ( options.verboseflag == True ):
logger.info("\r\nUpdate: " + file)
else:
logger.info(' Update file %d of %d' %(files2update.index(file)+1,len(files2update)))
(filesize,md5s) = md5sums[file]
result = cntlrRsh.copy2cntlr(filepath,filesize)
if ( result == False ):
failures = True
logger.critical(" Failed to copy '%s' to Controller" % (filepath))
# Cntlr-C pressed during transfer?
if cntlrRsh.trapped:
logger.info("Control-C was pressed during transfer, do you wish to Abort?")
answer = raw_input('\r\nAbort? (y/n): ')
# print answer
if ( answer not in [ 'n', 'N', 'No', 'NO' ] ):
logger.info("\r\nAborting further transfers.")
return
if ( failures == False):
reboot=1
if ( reboot == 1):
logger.info("\r\nRebooting Controllers")
cntlrRsh = rshCmd("master1")
cntlrRsh.reboot() # reboot controller ?
if ( len(nddstypedic.keys()) > 1):
logger.info(" ")
logger.info("W A R N I N G !, Incompatible NDDS Versions detected.")
logger.info(" ")
for key in nddstypedic:
logger.info("%27s: %s" % (key, ', '.join(nddstypedic[key])))
if ( len(rftypedic.keys()) > 1):
logger.info(" ")
logger.info("W A R N I N G !, Multiple RFs detected.")
logger.info(" ")
for key in rftypedic:
logger.info("%27s: %s" % (key, ', '.join(rftypedic[key])))
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
#
# traverse a list of lists, etc.
#
def traverse(o, tree_types=(list, tuple)):
if isinstance(o, tree_types):
for value in o:
for subvalue in traverse(value):
yield subvalue
else:
yield o
import pexpect
#
# Entry point of Script
#
if __name__ == "__main__":
# print "\n".join(sys.argv)
dateTime = datetime.datetime.today()
datetimestr = dateTime.strftime("%Y-%m-%d:%H:%M:%S")
parser = OptionParser(usage=__doc__)
parser.add_option("-f", "--file", dest="loggingFilename",
help="Logging base file name, a timestamp.log with be appended to name e.g. '_2012-05-30:21:34:13.log'", metavar="FileName")
parser.add_option("-l", "--location", action="store", dest="dwnldpath", default="/vnmr/acq/download", metavar="DirectoryPath",
help="location/directory path of controller's files on host (default: /vnmr/acq/download)")
parser.add_option("-d", "--debug",
action="store_true", dest="debugOutput", default=False,
help="Write debugging output to debug log file, e.g. 'verifyCntlrsFlash_Debug_2012-05-30:21:34:13.log'")
parser.add_option("-y", "--autoupdate",
action="store_true", dest="autoupdateflag", default=False,
help="Automaticly update files that need to be, without confirmation")
parser.add_option("-n", "--noupdates",
action="store_true", dest="noupdateflag", default=False,
help="Do not update flash file and do not ask to update files. Useful if you want just a log of the present state of the controllers.")
parser.add_option("-v", "--verbose",
action="store_true", dest="verboseflag", default=False,
help="Verbose output as in previous versions")
(options, args) = parser.parse_args()
# print(args)
# arglist is going to be a lists of lists
arglist = []
for argitem in args:
arglist.append(argitem.split(','))
# travarse the list of lists creating a simple list of all targeted controller
targets = []
for cntlr in traverse(arglist):
targets.append(cntlr)
#print(targets)
#print options # acces via the options.optionname
#print args # accces as an array index, args[0] - 1st arg not name of script
#print options.loggingFilename
#print args[0]
#formatter = logging.Formatter('%(asctime)-6s: %(name)s - %(levelname)s - %(message)s')
#formatter = logging.Formatter(
# '[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s','%m-%d %H:%M:%S')
# console_formatter = logging.Formatter('%(asctime)-6s: %(message)s','%m-%d %H:%M:%S')
console_formatter = logging.Formatter('%(message)s')
consolelog_formatter = logging.Formatter('%(asctime)-6s: %(message)s','%m-%d %H:%M:%S')
debug_formatter = logging.Formatter(
'[%(asctime)s] {%(filename)s:%(lineno)d:%(funcName)s} %(levelname)s - %(message)s','%m-%d %H:%M:%S')
consoleLogger = logging.StreamHandler()
consoleLogger.setLevel(logging.INFO)
consoleLogger.setFormatter(console_formatter)
logging.getLogger('').addHandler(consoleLogger)
if ( options.loggingFilename == None):
loggingFilename = 'verifyCntlrsFlash_' + datetimestr + '.log'
else:
loggingFilename = options.loggingFilename + '_' + datetimestr + '.log'
fileLogger = logging.FileHandler(filename=loggingFilename)
fileLogger.setLevel(logging.INFO)
fileLogger.setFormatter(consolelog_formatter)
logging.getLogger('').addHandler(fileLogger)
if (options.debugOutput):
debugLogger = logging.FileHandler(filename='verifyCntlrsFlash_Debug_' + datetimestr + '.log')
debugLogger.setLevel(logging.DEBUG)
debugLogger.setFormatter(debug_formatter)
logging.getLogger('').addHandler(debugLogger)
logger = logging.getLogger('verifyCntlrsFlash logger')
logger.setLevel(logging.DEBUG)
logger.info(" ")
logger.info(" ------ " + datetimestr + " ------- ")
logger.info(" ")
logger.info('Log file: "%s"' % (loggingFilename))
logger.info(" ")
if (options.debugOutput):
logger.info(" ")
logger.info('Debug Log file: "%s"' % ('verifyCntlrsFlash_Debug-' + datetimestr + '.log'))
logger.info(" ")
try:
main()
except Exception as e:
print(str(e))
traceback.print_exc()
os._exit(1)
logger.info(" ")
logger.info(" ------ Completed: " + datetimestr + " ------- ")
logger.info(" ")
|
ryosuzuki/crowdsource-platform | refs/heads/develop2 | crowdsourcing/permissions/project.py | 3 | __author__ = 'dmorina'
from rest_framework import permissions
class IsProjectOwnerOrCollaborator(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if obj.owner == request.user.userprofile.requester:
return True
for collaborator in obj.collaborators.all():
if collaborator.profile.user == request.user:
return True
return False
class IsReviewerOrRaterOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.worker.profile.user == request.user
|
kotfic/girder | refs/heads/master | plugins/dicom_viewer/plugin_tests/dicom_viewer_test.py | 2 | import os
import json
import six
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.upload import Upload
from girder.models.user import User
import pydicom
from tests import base
from girder_dicom_viewer import _removeUniqueMetadata, _extractFileData
from girder_dicom_viewer.event_helper import _EventHelper
def setUpModule():
base.enabledPlugins.append('dicom_viewer')
base.startServer()
global _removeUniqueMetadata
global _extractFileData
def tearDownModule():
base.stopServer()
class DicomViewerTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.dataDir = os.path.join(
os.environ['GIRDER_TEST_DATA_PREFIX'], 'plugins', 'dicom_viewer')
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@u.com' % num)
for num in [0, 1]]
def testRemoveUniqueMetadata(self):
dicomMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'commonVal',
'uniqueKey1': 'commonVal'
}
additionalMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54,
'key6': 'uniqueVal',
'uniqueKey2': 'commonVal',
}
commonMeta = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 35,
'key5': 54
}
self.assertEqual(_removeUniqueMetadata(dicomMeta, additionalMeta), commonMeta)
def testExtractFileData(self):
dicomFile = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'assetstoreId': '599c4a19c9c5cb11f1ff5d32',
'creatorId': '5984b9fec9c5cb370447068c',
'exts': ['dcm'],
'itemId': '599c4cf3c9c5cb11f1ff5d96',
'mimeType': 'application/dicom',
'name': '000000.dcm',
'size': 133356
}
dicomMeta = {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
result = {
'_id': '599c4cf3c9c5cb11f1ff5d97',
'name': '000000.dcm',
'dicom': {
'SeriesNumber': 1,
'InstanceNumber': 1,
'SliceLocation': 0
}
}
self.assertEqual(_extractFileData(dicomFile, dicomMeta), result)
def testFileProcessHandler(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection1', admin, public=True)
folder = Folder().createFolder(collection, 'folder1', parentType='collection', public=True)
item = Item().createItem('item1', admin, folder)
# Upload non-DICOM files
self._uploadNonDicomFiles(item, admin)
nonDicomItem = Item().load(item['_id'], force=True)
self.assertIsNone(nonDicomItem.get('dicom'))
# Upload DICOM files
self._uploadDicomFiles(item, admin)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check if the files list contain the good keys and all the file are well sorted
for i in range(0, 4):
self.assertTrue('_id' in dicomItem['dicom']['files'][i])
self.assertTrue('name' in dicomItem['dicom']['files'][i])
self.assertEqual(dicomItem['dicom']['files'][i]['name'], 'dicomFile{}.dcm'.format(i))
self.assertTrue('SeriesNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('InstanceNumber' in dicomItem['dicom']['files'][i]['dicom'])
self.assertTrue('SliceLocation' in dicomItem['dicom']['files'][i]['dicom'])
# Check the common metadata
self.assertIsNotNone(dicomItem['dicom']['meta'])
def testMakeDicomItem(self):
admin, user = self.users
# create a collection, folder, and item
collection = Collection().createCollection('collection2', admin, public=True)
folder = Folder().createFolder(collection, 'folder2', parentType='collection', public=True)
item = Item().createItem('item2', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Check the endpoint 'parseDicom' for an admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=admin)
self.assertStatusOk(resp)
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
# Check the endpoint 'parseDicom' for an non admin user
dicomItem = Item().load(item['_id'], force=True)
dicomItem = self._purgeDicomItem(dicomItem)
path = '/item/%s/parseDicom' % dicomItem.get('_id')
resp = self.request(path=path, method='POST', user=user)
self.assertStatus(resp, 403)
def _uploadNonDicomFiles(self, item, user):
# Upload a fake file to check that the item is not traited
nonDicomContent = b'hello world\n'
ndcmFile = Upload().uploadFromFile(
obj=six.BytesIO(nonDicomContent),
size=len(nonDicomContent),
name='nonDicom.txt',
parentType='item',
parent=item,
mimeType='text/plain',
user=user
)
self.assertIsNotNone(ndcmFile)
def _uploadDicomFiles(self, item, user):
# Upload the files in the reverse order to check if they're well sorted
for i in [1, 3, 0, 2]:
file = os.path.join(self.dataDir, '00000%i.dcm' % i)
with open(file, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(file),
name='dicomFile{}.dcm'.format(i),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
def _purgeDicomItem(self, item):
item.pop('dicom')
return item
def testSearchForDicomItem(self):
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection3', admin, public=True)
folder = Folder().createFolder(collection, 'folder3', parentType='collection', public=True)
item = Item().createItem('item3', admin, folder)
# Upload files
self._uploadDicomFiles(item, admin)
# Search for DICOM item with 'brain research' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'brain research',
'mode': 'dicom',
'types': json.dumps(["item"])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# Search for DICOM item with substring 'in resea' as common key/value
resp = self.request(path='/resource/search', params={
'q': 'in resea',
'mode': 'dicom',
'types': json.dumps(["item"])
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json['item']), 1)
self.assertEqual(resp.json['item'][0]['name'], 'item3')
# TODO: Add test to search for a private DICOM item with an other user
# this test should not found anything
def testDicomWithIOError(self):
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'CT_small.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection4', admin, public=True)
folder = Folder().createFolder(collection, 'folder4', parentType='collection', public=True)
item = Item().createItem('item4', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
def testDicomWithBinaryValues(self):
# One of the test files in the pydicom module will throw an IOError
# when parsing metadata. We should work around that and still be able
# to import the file
samplePath = os.path.join(os.path.dirname(os.path.abspath(
pydicom.__file__)), 'data', 'test_files', 'OBXXXX1A.dcm')
admin, user = self.users
# Create a collection, folder, and item
collection = Collection().createCollection('collection5', admin, public=True)
folder = Folder().createFolder(collection, 'folder5', parentType='collection', public=True)
item = Item().createItem('item5', admin, folder)
# Upload this dicom file
with open(samplePath, 'rb') as fp, _EventHelper('dicom_viewer.upload.success') as helper:
dcmFile = Upload().uploadFromFile(
obj=fp,
size=os.path.getsize(samplePath),
name=os.path.basename(samplePath),
parentType='item',
parent=item,
mimeType='application/dicom',
user=user
)
self.assertIsNotNone(dcmFile)
# Wait for handler success event
handled = helper.wait()
self.assertTrue(handled)
# Check if the 'dicomItem' is well processed
dicomItem = Item().load(item['_id'], force=True)
self.assertIn('dicom', dicomItem)
self.assertHasKeys(dicomItem['dicom'], ['meta', 'files'])
|
nowismytime/laughing-waffle | refs/heads/master | copy3.py | 1 | import nltk
# floyd warshall algorithm
def flwa(gmatrix):
N = len(gmatrix[0])
dmatrix = []
for a in range(N):
temp = []
for b in range(N):
temp.extend([0])
dmatrix.append(temp)
for a in range(N):
for b in range(N):
if (a!=b) & (gmatrix[a][b]==0):
dmatrix[a][b]=9999
else:
dmatrix[a][b]=gmatrix[a][b]
for k in range(N):
for i in range(N):
for j in range(N):
if dmatrix[i][k]+dmatrix[k][j] < dmatrix[i][j]:
dmatrix[i][j] = dmatrix[i][k]+dmatrix[k][j]
return dmatrix
# disjoint sets functions
# create sets for each element
def setCreate (disets, element):
nset = set()
nset.add(element)
hashmap={element: nset}
disets.append(hashmap)
# union two sets containing elements 1 and 2
def union (disets, element1, element2):
first_rep = setFind(disets, element1)
second_rep = setFind(disets, element2)
first_set = set()
second_set = set()
for index in range(len(disets)):
if first_rep in disets[index]:
first_set = disets[index][first_rep]
elif second_rep in disets[index]:
second_set = disets[index][second_rep]
if (len(first_set) != 0) & (len(second_set) != 0):
first_set=first_set.union(second_set)
for index in range(len(disets)):
if first_rep in disets[index]:
disets[index][first_rep] = first_set
for index in range(len(disets)):
if second_rep in disets[index]:
del disets[index][second_rep]
disets.remove(disets[index])
break
# find the representative for the set containing the element
def setFind (disets, element):
for index in range(len(disets)):
keys = disets[index].keys()
for key in keys:
if element in disets[index][key]:
return key
# clustering function
def agcluster (disets, geomatrix, words):
for i in range(len(words)):
setCreate(disets, words[i])
while len(disets)>20:
N = len(geomatrix[0])
imin = -1
jmin = -1
min = 9999
for i in range(N):
for j in range(N):
if (geomatrix[i][j] != 0) & (geomatrix[i][j]<min):
imin = i
jmin = j
min = geomatrix[i][j]
if (imin==-1):
break
first = words[imin]
second = words[jmin]
geomatrix[imin][jmin] = 9999
if setFind(disets, first) != setFind(disets, second):
union(disets, first, second)
#for i in range(len(disets)):
# keys = disets[i].keys()
# for key in keys:
# temp = disets[i][key]
# for t1 in temp:
# print (t1)
# print (" ")
# print ("\n")
return disets
# main function
hm1 = {}
hm2 = {}
finalwords = []
finalwords1 = []
# getting stopwords
with open("D:\\User Libraries\\Documents\\NLP\\stopwords.txt") as f:
stopwords = [line.rstrip('\n') for line in open("D:\\User Libraries\\Documents\\NLP\\stopwords.txt")]
# print(stopwords)
# getting input file
with open("D:\\User Libraries\\Documents\\NLP\\ii.txt") as f:
for line in f:
words = nltk.word_tokenize(line)
finalwords.extend(nltk.pos_tag(words))
# print(finalwords)
# print (len(finalwords))
# removing stopwords
for index in range(len(finalwords)):
words = finalwords[index]
temp = words[0].lower()
if temp not in stopwords:
if (temp != ".") & (temp != ",") & (temp != "–") & (temp != ":"):
finalwords1.append(words)
# print (finalwords1)
# print (len(finalwords1))
# getting hashmaps
i = 0
for index in range(len(finalwords1)):
words = finalwords1[index]
temp = words[0].lower()
if temp not in hm1.keys():
hm1[temp] = i
hm2[i] = temp
i += 1
# generating adjacency matrix
gmatrix = []
for a in range(len(hm1)):
temp = []
for b in range(len(hm1)):
temp.extend([0])
gmatrix.append(temp)
for a in range(len(finalwords1)):
for b in range(a-5,a+5):
if(b >= 0) & (b < len(finalwords1)) & (b != a):
a1 = finalwords1[a][1]
a2 = finalwords1[b][1]
row = hm1[finalwords1[a][0].lower()]
col = hm1[finalwords1[b][0].lower()]
if a1 == a2:
gmatrix[row][col]=1
else:
gmatrix[row][col]=3
#print(gmatrix)
# getting geodesic matrix
dmatrix = flwa(gmatrix)
#print(dmatrix)
disets = []
dlist = agcluster(disets,dmatrix,hm2)
for index in range(len(dlist)):
print(dlist[index])
print("\n")
|
civisanalytics/ansible | refs/heads/civis | lib/ansible/modules/cloud/ovirt/ovirt_vms_facts.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_vms_facts
short_description: Retrieve facts about one or more oVirt virtual machines
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt virtual machines."
notes:
- "This module creates a new top-level C(ovirt_vms) fact, which
contains a list of virtual machines."
options:
pattern:
description:
- "Search term which is accepted by oVirt search backend."
- "For example to search VM X from cluster Y use following pattern:
name=X and cluster=Y"
all_content:
description:
- "If I(true) all the attributes of the virtual machines should be
included in the response."
case_sensitive:
description:
- "If I(true) performed search will take case into account."
max:
description:
- "The maximum number of results to return."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all VMs which names start with C(centos) and
# belong to cluster C(west):
- ovirt_vms_facts:
pattern: name=centos* and cluster=west
- debug:
var: ovirt_vms
'''
RETURN = '''
ovirt_vms:
description: "List of dictionaries describing the VMs. VM attribues are mapped to dictionary keys,
all VMs attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
all_content=dict(default=False, type='bool'),
case_sensitive=dict(default=True, type='bool'),
max=dict(default=None, type='int'),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
vms_service = connection.system_service().vms_service()
vms = vms_service.list(
search=module.params['pattern'],
all_content=module.params['all_content'],
case_sensitive=module.params['case_sensitive'],
max=module.params['max'],
)
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_vms=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in vms
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=False)
if __name__ == '__main__':
main()
|
marcellodesales/svnedge-console | refs/heads/master | svn-server/lib/pygments/lexers/other.py | 54 | # -*- coding: utf-8 -*-
"""
pygments.lexers.other
~~~~~~~~~~~~~~~~~~~~~
Lexers for other languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this, do_insertions
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
from pygments.lexers.web import HtmlLexer
__all__ = ['SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'BrainfuckLexer',
'BashLexer', 'BatchLexer', 'BefungeLexer', 'RedcodeLexer',
'MOOCodeLexer', 'SmalltalkLexer', 'TcshLexer', 'LogtalkLexer',
'GnuplotLexer', 'PovrayLexer', 'AppleScriptLexer',
'BashSessionLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer',
'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer']
line_re = re.compile('.*?\n')
class SqlLexer(RegexLexer):
"""
Lexer for Structured Query Language. Currently, this lexer does
not recognize any special syntax except ANSI SQL.
"""
name = 'SQL'
aliases = ['sql']
filenames = ['*.sql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|'
r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|'
r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|'
r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|'
r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|'
r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|'
r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|'
r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|'
r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|'
r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|'
r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|'
r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|'
r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|'
r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|'
r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|'
r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|'
r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|'
r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|'
r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|'
r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|'
r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|'
r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|'
r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|'
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|'
r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|'
r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|'
r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|'
r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|'
r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|'
r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|'
r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|'
r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|'
r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|'
r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|'
r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|'
r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|'
r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|'
r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|'
r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|'
r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|'
r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|'
r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|'
r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|'
r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|'
r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|'
r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|'
r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|'
r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|'
r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|'
r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|'
r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|'
r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|'
r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|'
r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|'
r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|'
r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|'
r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|'
r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|'
r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|'
r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|'
r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|'
r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|'
r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|'
r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|'
r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|'
r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|'
r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|'
r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword),
(r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|'
r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|'
r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b',
Name.Builtin),
(r'[+*/<>=~!@#%^&|`?^-]', Operator),
(r'[0-9]+', Number.Integer),
# TODO: Backslash escapes?
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class MySqlLexer(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
# TODO: add backslash escapes
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r"`(``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?^-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8'
r'|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
# TODO: this list is not complete
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
*New in Pygments 0.11.*
"""
name = 'sqlite3con'
aliases = ['sqlite3']
filenames = ['*.sqlite3-console']
mimetypes = ['text/x-sqlite3-console']
def get_tokens_unprocessed(self, data):
sql = SqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('SQL error: '):
yield (match.start(), Generic.Traceback, line)
else:
yield (match.start(), Generic.Output, line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
class BrainfuckLexer(RegexLexer):
"""
Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_
language.
"""
name = 'Brainfuck'
aliases = ['brainfuck', 'bf']
filenames = ['*.bf', '*.b']
mimetypes = ['application/x-brainfuck']
tokens = {
'common': [
# use different colors for different instruction types
(r'[.,]+', Name.Tag),
(r'[+-]+', Name.Builtin),
(r'[<>]+', Name.Variable),
(r'[^.,+\-<>\[\]]+', Comment),
],
'root': [
(r'\[', Keyword, 'loop'),
(r'\]', Error),
include('common'),
],
'loop': [
(r'\[', Keyword, '#push'),
(r'\]', Keyword, '#pop'),
include('common'),
]
}
class BefungeLexer(RegexLexer):
"""
Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_
language.
*New in Pygments 0.7.*
"""
name = 'Befunge'
aliases = ['befunge']
filenames = ['*.befunge']
mimetypes = ['application/x-befunge']
tokens = {
'root': [
(r'[0-9a-f]', Number),
(r'[\+\*/%!`-]', Operator), # Traditional math
(r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives
(r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives
(r'[|_mw]', Keyword),
(r'[{}]', Name.Tag), # Befunge-98 stack ops
(r'".*?"', String.Double), # Strings don't appear to allow escapes
(r'\'.', String.Single), # Single character
(r'[#;]', Comment), # Trampoline... depends on direction hit
(r'[pg&~=@iotsy]', Keyword), # Misc
(r'[()A-Z]', Comment), # Fingerprints
(r'\s+', Text), # Whitespace doesn't matter
],
}
class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)\s*\b',
Keyword),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Text),
(r'\s+', Text),
(r'[^=\s\n\[\]{}()$"\'`\\<]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
return shebang_matches(text, r'(ba|z|)sh')
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
*New in Pygments 1.1.*
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)?|\[\S+[@:]'
r'[^\n]+\].+)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
*New in Pygments 0.7.*
"""
name = 'Batchfile'
aliases = ['bat']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class RedcodeLexer(RegexLexer):
"""
A simple Redcode lexer based on ICWS'94.
Contributed by Adam Blinkinsop <blinks@acm.org>.
*New in Pygments 0.8.*
"""
name = 'Redcode'
aliases = ['redcode']
filenames = ['*.cw']
opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD',
'JMP','JMZ','JMN','DJN','CMP','SLT','SPL',
'ORG','EQU','END']
modifiers = ['A','B','AB','BA','F','X','I']
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
(r';.*$', Comment.Single),
# Lexemes:
# Identifiers
(r'\b(%s)\b' % '|'.join(opcodes), Name.Function),
(r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator),
(r'[A-Za-z_][A-Za-z_0-9]+', Name),
# Operators
(r'[-+*/%]', Operator),
(r'[#$@<>]', Operator), # mode
(r'[.,]', Punctuation), # mode
# Numbers
(r'[-+]?\d+', Number.Integer),
],
}
class MOOCodeLexer(RegexLexer):
"""
For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting
language).
*New in Pygments 0.9.*
"""
name = 'MOOCode'
filenames = ['*.moo']
aliases = ['moocode']
mimetypes = ['text/x-moocode']
tokens = {
'root' : [
# Numbers
(r'(0|[1-9][0-9_]*)', Number.Integer),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# exceptions
(r'(E_PERM|E_DIV)', Name.Exception),
# db-refs
(r'((#[-0-9]+)|(\$[a-z_A-Z0-9]+))', Name.Entity),
# Keywords
(r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while'
r'|endwhile|break|continue|return|try'
r'|except|endtry|finally|in)\b', Keyword),
# builtins
(r'(random|length)', Name.Builtin),
# special variables
(r'(player|caller|this|args)', Name.Variable.Instance),
# skip whitespace
(r'\s+', Text),
(r'\n', Text),
# other operators
(r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator),
# function call
(r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)),
# variables
(r'([a-zA-Z_0-9]+)', Text),
]
}
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
*New in Pygments 0.10.*
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak']
mimetypes = ['text/x-smalltalk']
tokens = {
'root' : [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition' : [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables' : [
include('whitespaces'),
(r'(:)(\s*)([A-Za-z\w]+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
(r'', Text, '#pop'), # else pop
],
'literals' : [
(r'\'[^\']*\'', String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper' : [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w+:]+', String.Symbol),
# literals
(r'\'[^\']*\'', String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth' : [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root','afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces' : [
# skip whitespace and comments
(r'\s+', Text),
(r'"[^"]*"', Comment),
],
'objects' : [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("[^"]*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject' : [
(r'! !$', Keyword , '#pop'), # squeak chunk delimeter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout' : [
# Squeak fileout format (optional)
(r'^"[^"]*"!', Keyword),
(r"^'[^']*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r'^(!)(\w+(?: class)?)( methodsFor: )(\'[^\']*\')(.*?!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
*New in Pygments 0.10.*
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|'
r'fg|filetest|getxvers|glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|set|shift|'
r'sched|setenv|setpath|settc|setty|setxvers|shift|source|stop|suspend|'
r'source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\n\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
*New in Pygments 0.10.*
"""
name = 'Logtalk'
aliases = ['logtalk']
filenames = ['*.lgt']
mimetypes = ['text/x-logtalk']
tokens = {
'root': [
# Directives
(r'^\s*:-\s',Punctuation,'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number),
(r'0o[0-7]+', Number),
(r'0x[0-9a-fA-F]+', Number),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Event handlers
(r'(after|before)(?=[(])', Keyword),
# Execution-context methods
(r'(parameter|this|se(lf|nder))(?=[(])', Keyword),
# Reflection
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
# DCGs and term expansion
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])',
Keyword),
# Entity
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])',
Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
# Entity relations
(r'complements_object(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
# Events
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
# Flags
(r'(current|set)_logtalk_flag(?=[(])', Keyword),
# Compiling, loading, and library paths
(r'logtalk_(compile|l(ibrary_path|oad))(?=[(])', Keyword),
# Database
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
# Control
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fail|true)\b', Keyword),
# All solutions
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
# Multi-threading meta-predicates
(r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])',
Keyword),
# Term unification
(r'unify_with_occurs_check(?=[(])', Keyword),
# Term creation and decomposition
(r'(functor|arg|copy_term)(?=[(])', Keyword),
# Evaluable functors
(r'(rem|mod|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|truncate|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
(r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|compound|n(onvar|umber))(?=[(])',
Keyword),
# Stream selection and control
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])',
Keyword),
# Character and byte input/output
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
# Term input/output
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
# Atomic term processing
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
# Implementation defined hooks functions
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
# Message sending operators
(r'(::|:|\^\^)', Operator),
# External call
(r'[{}]', Keyword),
# Logic and control
(r'\bonce(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
# Bitwise functors
(r'(>>|<<|/\\|\\\\|\\)', Operator),
# Arithemtic evaluation
(r'\bis\b', Keyword),
# Arithemtic comparison
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
# Term creation and decomposition
(r'=\.\.', Operator),
# Term unification
(r'(=|\\=)', Operator),
# Term comparison
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
(r'\b(mod|rem)\b', Operator),
# Other arithemtic functors
(r'\b\*\*\b', Operator),
# DCG rules
(r'-->', Operator),
# Control constructs
(r'([!;]|->)', Operator),
# Logic and control
(r'\\+', Operator),
# Mode operators
(r'[?@]', Operator),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# Ponctuation
(r'[()\[\],.|]', Text),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
],
'quoted_atom': [
(r"['][']", String),
(r"[']", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))[.]', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))[.]',Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(fo|itialization)(?=[(])', Keyword, 'root'),
(r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|'
r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(calls|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
(r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'),
],
'entityrelations': [
(r'(extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])',
Keyword),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number),
(r'0o[0-7]+', Number),
(r'0x[0-9a-fA-F]+', Number),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"[']", String, 'quoted_atom'),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Ponctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/',Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return True
if ':- protocol(' in text:
return True
if ':- category(' in text:
return True
return False
def _shortened(word):
dpos = word.find('$')
return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1)])
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
*New in Pygments 0.11.*
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
('', Comment, '#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
('[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der",
"box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta",
"data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign",
"fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid",
"hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle",
"la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale",
"mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin",
"rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot",
"mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics",
"nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics",
"mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput",
"pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot",
"poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze",
"st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs",
"ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le",
"v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta",
"yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel",
"yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs",
"x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs",
"zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs",
"x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs",
"noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs",
"xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs",
"noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs",
"cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange",
"y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange",
"vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis",
"zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
*New in Pygments 0.11.*
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(r'#(debug|default|else|end|error|fclose|fopen|if|ifdef|ifndef|'
r'include|range|read|render|statistics|switch|undef|version|'
r'warning|while|write|define|macro|local|declare)',
Comment.Preproc),
(r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|'
r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|'
r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|'
r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|'
r'attenuating|average|background|black_hole|blue|blur_samples|'
r'bounded_by|box_mapping|bozo|break|brick|brick_size|'
r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|'
r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|'
r'color|color_map|colour|colour_map|component|composite|concat|'
r'confidence|conic_sweep|constant|control0|control1|cos|cosh|'
r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|'
r'debug|declare|default|degrees|dents|diffuse|direction|'
r'distance|distance_maximum|div|dust|dust_type|eccentricity|'
r'else|emitting|end|error|error_bound|exp|exponent|'
r'fade_distance|fade_power|falloff|falloff_angle|false|'
r'file_exists|filter|finish|fisheye|flatness|flip|floor|'
r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|'
r'global_settings|glowing|gradient|granite|gray_threshold|'
r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|'
r'if|ifdef|iff|image_map|incidence|include|int|interpolate|'
r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|'
r'linear|linear_spline|linear_sweep|location|log|looks_like|'
r'look_at|low_error_factor|mandel|map_type|marble|material_map|'
r'matrix|max|max_intersections|max_iteration|max_trace_level|'
r'max_value|metallic|min|minimum_reuse|mod|mortar|'
r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|'
r'octaves|off|offset|omega|omnimax|on|once|onion|open|'
r'orthographic|panoramic|pattern1|pattern2|pattern3|'
r'perspective|pgm|phase|phong|phong_size|pi|pigment|'
r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|'
r'precision|pwr|quadratic_spline|quaternion|quick_color|'
r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|'
r'ramp_wave|rand|range|reciprocal|recursion_limit|red|'
r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|'
r'right|ripples|rotate|roughness|samples|scale|scallop_wave|'
r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|'
r'slice|slope_map|smooth|specular|spherical_mapping|spiral|'
r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|'
r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|'
r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|'
r'test_camera_4|texture|texture_map|tga|thickness|threshold|'
r'tightness|tile2|tiles|track|transform|translate|transmit|'
r'triangle_wave|true|ttf|turbulence|turb_depth|type|'
r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|'
r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|'
r'vnormalize|volume_object|volume_rendered|vol_with_light|'
r'vrotate|v_steps|warning|warp|water_level|waves|while|width|'
r'wood|wrinkles|yes)\b', Keyword),
(r'bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|'
r'disc|height_field|intersection|julia_fractal|lathe|'
r'light_source|merge|mesh|object|plane|poly|polygon|prism|'
r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|'
r'text|torus|triangle|union', Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\s+', Text),
]
}
class AppleScriptLexer(RegexLexer):
"""
For `AppleScript source code
<http://developer.apple.com/documentation/AppleScript/
Conceptual/AppleScriptLangGuide>`_,
including `AppleScript Studio
<http://developer.apple.com/documentation/AppleScript/
Reference/StudioReference>`_.
Contributed by Andreas Amann <aamann@mac.com>.
"""
name = 'AppleScript'
aliases = ['applescript']
filenames = ['*.applescript']
flags = re.MULTILINE | re.DOTALL
Identifiers = r'[a-zA-Z]\w*'
Literals = ['AppleScript', 'current application', 'false', 'linefeed',
'missing value', 'pi','quote', 'result', 'return', 'space',
'tab', 'text item delimiters', 'true', 'version']
Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ',
'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ',
'real ', 'record ', 'reference ', 'RGB color ', 'script ',
'text ', 'unit types', '(Unicode )?text', 'string']
BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month',
'paragraph', 'word', 'year']
HandlerParams = ['about', 'above', 'against', 'apart from', 'around',
'aside from', 'at', 'below', 'beneath', 'beside',
'between', 'for', 'given', 'instead of', 'on', 'onto',
'out of', 'over', 'since']
Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL',
'choose application', 'choose color', 'choose file( name)?',
'choose folder', 'choose from list',
'choose remote application', 'clipboard info',
'close( access)?', 'copy', 'count', 'current date', 'delay',
'delete', 'display (alert|dialog)', 'do shell script',
'duplicate', 'exists', 'get eof', 'get volume settings',
'info for', 'launch', 'list (disks|folder)', 'load script',
'log', 'make', 'mount volume', 'new', 'offset',
'open( (for access|location))?', 'path to', 'print', 'quit',
'random number', 'read', 'round', 'run( script)?',
'say', 'scripting components',
'set (eof|the clipboard to|volume)', 'store script',
'summarize', 'system attribute', 'system info',
'the clipboard', 'time to GMT', 'write', 'quoted form']
References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)',
'first', 'second', 'third', 'fourth', 'fifth', 'sixth',
'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back',
'before', 'behind', 'every', 'front', 'index', 'last',
'middle', 'some', 'that', 'through', 'thru', 'where', 'whose']
Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not",
"isn't", "isn't equal( to)?", "is not equal( to)?",
"doesn't equal", "does not equal", "(is )?greater than",
"comes after", "is not less than or equal( to)?",
"isn't less than or equal( to)?", "(is )?less than",
"comes before", "is not greater than or equal( to)?",
"isn't greater than or equal( to)?",
"(is )?greater than or equal( to)?", "is not less than",
"isn't less than", "does not come before",
"doesn't come before", "(is )?less than or equal( to)?",
"is not greater than", "isn't greater than",
"does not come after", "doesn't come after", "starts? with",
"begins? with", "ends? with", "contains?", "does not contain",
"doesn't contain", "is in", "is contained by", "is not in",
"is not contained by", "isn't contained by", "div", "mod",
"not", "(a )?(ref( to)?|reference to)", "is", "does"]
Control = ['considering', 'else', 'error', 'exit', 'from', 'if',
'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to',
'try', 'until', 'using terms from', 'while', 'whith',
'with timeout( of)?', 'with transaction', 'by', 'continue',
'end', 'its?', 'me', 'my', 'return', 'of' , 'as']
Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get']
Reserved = ['but', 'put', 'returning', 'the']
StudioClasses = ['action cell', 'alert reply', 'application', 'box',
'browser( cell)?', 'bundle', 'button( cell)?', 'cell',
'clip view', 'color well', 'color-panel',
'combo box( item)?', 'control',
'data( (cell|column|item|row|source))?', 'default entry',
'dialog reply', 'document', 'drag info', 'drawer',
'event', 'font(-panel)?', 'formatter',
'image( (cell|view))?', 'matrix', 'menu( item)?', 'item',
'movie( view)?', 'open-panel', 'outline view', 'panel',
'pasteboard', 'plugin', 'popup button',
'progress indicator', 'responder', 'save-panel',
'scroll view', 'secure text field( cell)?', 'slider',
'sound', 'split view', 'stepper', 'tab view( item)?',
'table( (column|header cell|header view|view))',
'text( (field( cell)?|view))?', 'toolbar( item)?',
'user-defaults', 'view', 'window']
StudioEvents = ['accept outline drop', 'accept table drop', 'action',
'activated', 'alert ended', 'awake from nib', 'became key',
'became main', 'begin editing', 'bounds changed',
'cell value', 'cell value changed', 'change cell value',
'change item value', 'changed', 'child of item',
'choose menu item', 'clicked', 'clicked toolbar item',
'closed', 'column clicked', 'column moved',
'column resized', 'conclude drop', 'data representation',
'deminiaturized', 'dialog ended', 'document nib name',
'double clicked', 'drag( (entered|exited|updated))?',
'drop', 'end editing', 'exposed', 'idle', 'item expandable',
'item value', 'item value changed', 'items changed',
'keyboard down', 'keyboard up', 'launched',
'load data representation', 'miniaturized', 'mouse down',
'mouse dragged', 'mouse entered', 'mouse exited',
'mouse moved', 'mouse up', 'moved',
'number of browser rows', 'number of items',
'number of rows', 'open untitled', 'opened', 'panel ended',
'parameters updated', 'plugin loaded', 'prepare drop',
'prepare outline drag', 'prepare outline drop',
'prepare table drag', 'prepare table drop',
'read from file', 'resigned active', 'resigned key',
'resigned main', 'resized( sub views)?',
'right mouse down', 'right mouse dragged',
'right mouse up', 'rows changed', 'scroll wheel',
'selected tab view item', 'selection changed',
'selection changing', 'should begin editing',
'should close', 'should collapse item',
'should end editing', 'should expand item',
'should open( untitled)?',
'should quit( after last window closed)?',
'should select column', 'should select item',
'should select row', 'should select tab view item',
'should selection change', 'should zoom', 'shown',
'update menu item', 'update parameters',
'update toolbar item', 'was hidden', 'was miniaturized',
'will become active', 'will close', 'will dismiss',
'will display browser cell', 'will display cell',
'will display item cell', 'will display outline cell',
'will finish launching', 'will hide', 'will miniaturize',
'will move', 'will open', 'will pop up', 'will quit',
'will resign active', 'will resize( sub views)?',
'will select tab view item', 'will show', 'will zoom',
'write to file', 'zoomed']
StudioCommands = ['animate', 'append', 'call method', 'center',
'close drawer', 'close panel', 'display',
'display alert', 'display dialog', 'display panel', 'go',
'hide', 'highlight', 'increment', 'item for',
'load image', 'load movie', 'load nib', 'load panel',
'load sound', 'localized string', 'lock focus', 'log',
'open drawer', 'path for', 'pause', 'perform action',
'play', 'register', 'resume', 'scroll', 'select( all)?',
'show', 'size to fit', 'start', 'step back',
'step forward', 'stop', 'synchronize', 'unlock focus',
'update']
StudioProperties = ['accepts arrow key', 'action method', 'active',
'alignment', 'allowed identifiers',
'allows branch selection', 'allows column reordering',
'allows column resizing', 'allows column selection',
'allows customization',
'allows editing text attributes',
'allows empty selection', 'allows mixed state',
'allows multiple selection', 'allows reordering',
'allows undo', 'alpha( value)?', 'alternate image',
'alternate increment value', 'alternate title',
'animation delay', 'associated file name',
'associated object', 'auto completes', 'auto display',
'auto enables items', 'auto repeat',
'auto resizes( outline column)?',
'auto save expanded items', 'auto save name',
'auto save table columns', 'auto saves configuration',
'auto scroll', 'auto sizes all columns to fit',
'auto sizes cells', 'background color', 'bezel state',
'bezel style', 'bezeled', 'border rect', 'border type',
'bordered', 'bounds( rotation)?', 'box type',
'button returned', 'button type',
'can choose directories', 'can choose files',
'can draw', 'can hide',
'cell( (background color|size|type))?', 'characters',
'class', 'click count', 'clicked( data)? column',
'clicked data item', 'clicked( data)? row',
'closeable', 'collating', 'color( (mode|panel))',
'command key down', 'configuration',
'content(s| (size|view( margins)?))?', 'context',
'continuous', 'control key down', 'control size',
'control tint', 'control view',
'controller visible', 'coordinate system',
'copies( on scroll)?', 'corner view', 'current cell',
'current column', 'current( field)? editor',
'current( menu)? item', 'current row',
'current tab view item', 'data source',
'default identifiers', 'delta (x|y|z)',
'destination window', 'directory', 'display mode',
'displayed cell', 'document( (edited|rect|view))?',
'double value', 'dragged column', 'dragged distance',
'dragged items', 'draws( cell)? background',
'draws grid', 'dynamically scrolls', 'echos bullets',
'edge', 'editable', 'edited( data)? column',
'edited data item', 'edited( data)? row', 'enabled',
'enclosing scroll view', 'ending page',
'error handling', 'event number', 'event type',
'excluded from windows menu', 'executable path',
'expanded', 'fax number', 'field editor', 'file kind',
'file name', 'file type', 'first responder',
'first visible column', 'flipped', 'floating',
'font( panel)?', 'formatter', 'frameworks path',
'frontmost', 'gave up', 'grid color', 'has data items',
'has horizontal ruler', 'has horizontal scroller',
'has parent data item', 'has resize indicator',
'has shadow', 'has sub menu', 'has vertical ruler',
'has vertical scroller', 'header cell', 'header view',
'hidden', 'hides when deactivated', 'highlights by',
'horizontal line scroll', 'horizontal page scroll',
'horizontal ruler view', 'horizontally resizable',
'icon image', 'id', 'identifier',
'ignores multiple clicks',
'image( (alignment|dims when disabled|frame style|'
'scaling))?',
'imports graphics', 'increment value',
'indentation per level', 'indeterminate', 'index',
'integer value', 'intercell spacing', 'item height',
'key( (code|equivalent( modifier)?|window))?',
'knob thickness', 'label', 'last( visible)? column',
'leading offset', 'leaf', 'level', 'line scroll',
'loaded', 'localized sort', 'location', 'loop mode',
'main( (bunde|menu|window))?', 'marker follows cell',
'matrix mode', 'maximum( content)? size',
'maximum visible columns',
'menu( form representation)?', 'miniaturizable',
'miniaturized', 'minimized image', 'minimized title',
'minimum column width', 'minimum( content)? size',
'modal', 'modified', 'mouse down state',
'movie( (controller|file|rect))?', 'muted', 'name',
'needs display', 'next state', 'next text',
'number of tick marks', 'only tick mark values',
'opaque', 'open panel', 'option key down',
'outline table column', 'page scroll', 'pages across',
'pages down', 'palette label', 'pane splitter',
'parent data item', 'parent window', 'pasteboard',
'path( (names|separator))?', 'playing',
'plays every frame', 'plays selection only', 'position',
'preferred edge', 'preferred type', 'pressure',
'previous text', 'prompt', 'properties',
'prototype cell', 'pulls down', 'rate',
'released when closed', 'repeated',
'requested print time', 'required file type',
'resizable', 'resized column', 'resource path',
'returns records', 'reuses columns', 'rich text',
'roll over', 'row height', 'rulers visible',
'save panel', 'scripts path', 'scrollable',
'selectable( identifiers)?', 'selected cell',
'selected( data)? columns?', 'selected data items?',
'selected( data)? rows?', 'selected item identifier',
'selection by rect', 'send action on arrow key',
'sends action when done editing', 'separates columns',
'separator item', 'sequence number', 'services menu',
'shared frameworks path', 'shared support path',
'sheet', 'shift key down', 'shows alpha',
'shows state by', 'size( mode)?',
'smart insert delete enabled', 'sort case sensitivity',
'sort column', 'sort order', 'sort type',
'sorted( data rows)?', 'sound', 'source( mask)?',
'spell checking enabled', 'starting page', 'state',
'string value', 'sub menu', 'super menu', 'super view',
'tab key traverses cells', 'tab state', 'tab type',
'tab view', 'table view', 'tag', 'target( printer)?',
'text color', 'text container insert',
'text container origin', 'text returned',
'tick mark position', 'time stamp',
'title(d| (cell|font|height|position|rect))?',
'tool tip', 'toolbar', 'trailing offset', 'transparent',
'treat packages as directories', 'truncated labels',
'types', 'unmodified characters', 'update views',
'use sort indicator', 'user defaults',
'uses data source', 'uses ruler',
'uses threaded animation',
'uses title from previous column', 'value wraps',
'version',
'vertical( (line scroll|page scroll|ruler view))?',
'vertically resizable', 'view',
'visible( document rect)?', 'volume', 'width', 'window',
'windows menu', 'wraps', 'zoomable', 'zoomed']
tokens = {
'root': [
(r'\s+', Text),
(ur'¬\n', String.Escape),
(r"'s\s+", Text), # This is a possessive, consider moving
(r'(--|#).*?$', Comment),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[\(\){}!,.:]', Punctuation),
(ur'(«)([^»]+)(»)',
bygroups(Text, Name.Builtin, Text)),
(r'\b((?:considering|ignoring)\s*)'
r'(application responses|case|diacriticals|hyphens|'
r'numeric strings|punctuation|white space)',
bygroups(Keyword, Name.Builtin)),
(ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator),
(r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
(r'^(\s*(?:on|end)\s+)'
r'(%s)' % '|'.join(StudioEvents),
bygroups(Keyword, Name.Function)),
(r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)),
(r'\b(as )(%s)\b' % '|'.join(Classes),
bygroups(Keyword, Name.Class)),
(r'\b(%s)\b' % '|'.join(Literals), Name.Constant),
(r'\b(%s)\b' % '|'.join(Commands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(Control), Keyword),
(r'\b(%s)\b' % '|'.join(Declarations), Keyword),
(r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin),
(r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin),
(r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute),
(r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(References), Name.Builtin),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'\b(%s)\b' % Identifiers, Name.Variable),
(r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float),
(r'[-+]?\d+', Number.Integer),
],
'comment': [
('\(\*', Comment.Multiline, '#push'),
('\*\)', Comment.Multiline, '#pop'),
('[^*(]+', Comment.Multiline),
('[*(]', Comment.Multiline),
],
}
class ModelicaLexer(RegexLexer):
"""
For `Modelica <http://www.modelica.org/>`_ source code.
*New in Pygments 1.1.*
"""
name = 'Modelica'
aliases = ['modelica']
filenames = ['*.mo']
mimetypes = ['text/x-modelica']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment),
],
'statements': [
(r'"', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\]{},.;]', Punctuation),
(r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin),
(r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')"
r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class),
(r"('[\w\+\-\*\/\^]+'|\w+)", Name) ],
'root': [
include('whitespace'),
include('keywords'),
include('functions'),
include('operators'),
include('classes'),
(r'("<html>|<html>)', Name.Tag, 'html-content'),
include('statements')
],
'keywords': [
(r'(algorithm|annotation|break|connect|constant|constrainedby|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
r'end|equation|exit|expandable|extends|'
r'external|false|final|flow|for|if|import|in|inner|input|'
r'loop|nondiscrete|outer|output|parameter|partial|'
r'protected|public|redeclare|replaceable|stream|time|then|true|'
r'when|while|within)\b', Keyword)
],
'functions': [
(r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|'
r'cross|div|exp|floor|log|log10|mod|rem|sign|sin|sinh|size|'
r'sqrt|tan|tanh|zeros)\b', Name.Function)
],
'operators': [
(r'(and|assert|cardinality|change|delay|der|edge|initial|'
r'noEvent|not|or|pre|reinit|return|sample|smooth|'
r'terminal|terminate)\b', Name.Builtin)
],
'classes': [
(r'(block|class|connector|function|model|package|'
r'record|type)\b', Name.Class)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})',
String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String) # stray backslash
],
'html-content': [
(r'<\s*/\s*html\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)),
]
}
class RebolLexer(RegexLexer):
"""
A `REBOL <http://www.rebol.com/>`_ lexer.
*New in Pygments 1.1.*
"""
name = 'REBOL'
aliases = ['rebol']
filenames = ['*.r', '*.r3']
mimetypes = ['text/x-rebol']
flags = re.IGNORECASE | re.MULTILINE
re.IGNORECASE
escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)'
def word_callback(lexer, match):
word = match.group()
if re.match(".*:$", word):
yield match.start(), Generic.Subheading, word
elif re.match(
r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|'
r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|'
r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|'
r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|'
r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|'
r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|'
r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|'
r'while|compress|decompress|secure|open|close|read|read-io|'
r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|'
r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|'
r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|'
r'browse|launch|stats|get-modes|set-modes|to-local-file|'
r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|'
r'hide|draw|show|size-text|textinfo|offset-to-caret|'
r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|'
r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|'
r'dsa-make-key|dsa-generate-key|dsa-make-signature|'
r'dsa-verify-signature|rsa-make-key|rsa-generate-key|'
r'rsa-encrypt)$', word):
yield match.start(), Name.Builtin, word
elif re.match(
r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|'
r'minimum|maximum|negate|complement|absolute|random|head|tail|'
r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|'
r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|'
r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|'
r'copy)$', word):
yield match.start(), Name.Function, word
elif re.match(
r'(error|source|input|license|help|install|echo|Usage|with|func|'
r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|'
r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|'
r'remold|charset|array|replace|move|extract|forskip|forall|alter|'
r'first+|also|take|for|forever|dispatch|attempt|what-dir|'
r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|'
r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|'
r'build-tag|process-source|build-markup|decode-cgi|read-cgi|'
r'write-user|save-user|set-user-name|protect-system|parse-xml|'
r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|'
r'scroll-para|get-face|alert|set-face|uninstall|unfocus|'
r'request-dir|center-face|do-events|net-error|decode-url|'
r'parse-header|parse-header-date|parse-email-addrs|import-email|'
r'send|build-attach-body|resend|show-popup|hide-popup|open-events|'
r'find-key-face|do-face|viewtop|confine|find-window|'
r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|'
r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|'
r'read-thru|load-thru|do-thru|launch-thru|load-image|'
r'request-download|do-face-alt|set-font|set-para|get-style|'
r'set-style|make-face|stylize|choose|hilight-text|hilight-all|'
r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|'
r'resize-face|load-stock|load-stock-block|notify|request|flash|'
r'request-color|request-pass|request-text|request-list|'
r'request-date|request-file|dbug|editor|link-relative-path|'
r'emailer|parse-error)$', word):
yield match.start(), Keyword.Namespace, word
elif re.match(
r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|'
r'return|exit|break)$', word):
yield match.start(), Name.Exception, word
elif re.match('REBOL$', word):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
elif re.match(".*\?$", word):
yield match.start(), Keyword, word
elif re.match(".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
elif re.match("#.*", word):
yield match.start(), Name.Label, word # issue
elif re.match("%.*", word):
yield match.start(), Name.Decorator, word # file
else:
yield match.start(), Name.Variable, word
tokens = {
'root': [
(r'\s+', Text),
(r'#"', String.Char, 'char'),
(r'#{[0-9a-fA-F]*}', Number.Hex),
(r'2#{', Number.Hex, 'bin2'),
(r'64#{[0-9a-zA-Z+/=\s]*}', Number.Hex),
(r'"', String, 'string'),
(r'{', String, 'string2'),
(r';#+.*\n', Comment.Special),
(r';\*+.*\n', Comment.Preproc),
(r';.*\n', Comment),
(r'%"', Name.Decorator, 'stringFile'),
(r'%[^(\^{^")\s\[\]]+', Name.Decorator),
(r'<[a-zA-Z0-9:._-]*>', Name.Tag),
(r'<[^(<>\s")]+', Name.Tag, 'tag'),
(r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
(r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?'
r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+[xX]\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float),
(r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float),
(r'[+-]?\d+(\'\d+)?', Number),
(r'[\[\]\(\)]', Generic.Strong),
(r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator), # url
(r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # url
(r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # email
(r'comment\s', Comment, 'comment'),
(r'/[^(\^{^")\s/[\]]*', Name.Attribute),
(r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback),
(r'([^(\^{^")\s]+)', Text),
],
'string': [
(r'[^(\^")]+', String),
(escape_re, String.Escape),
(r'[\(|\)]+', String),
(r'\^.', String.Escape),
(r'"', String, '#pop'),
],
'string2': [
(r'[^(\^{^})]+', String),
(escape_re, String.Escape),
(r'[\(|\)]+', String),
(r'\^.', String.Escape),
(r'{', String, '#push'),
(r'}', String, '#pop'),
],
'stringFile': [
(r'[^(\^")]+', Name.Decorator),
(escape_re, Name.Decorator),
(r'\^.', Name.Decorator),
(r'"', Name.Decorator, '#pop'),
],
'char': [
(escape_re + '"', String.Char, '#pop'),
(r'\^."', String.Char, '#pop'),
(r'."', String.Char, '#pop'),
],
'tag': [
(escape_re, Name.Tag),
(r'"', Name.Tag, 'tagString'),
(r'[^(<>\r\n")]+', Name.Tag),
(r'>', Name.Tag, '#pop'),
],
'tagString': [
(r'[^(\^")]+', Name.Tag),
(escape_re, Name.Tag),
(r'[\(|\)]+', Name.Tag),
(r'\^.', Name.Tag),
(r'"', Name.Tag, '#pop'),
],
'tuple': [
(r'(\d+\.)+', Keyword.Constant),
(r'\d+', Keyword.Constant, '#pop'),
],
'bin2': [
(r'\s+', Number.Hex),
(r'([0-1]\s*){8}', Number.Hex),
(r'}', Number.Hex, '#pop'),
],
'comment': [
(r'"', Comment, 'commentString1'),
(r'{', Comment, 'commentString2'),
(r'\[', Comment, 'commentBlock'),
(r'[^(\s{\"\[]+', Comment, '#pop'),
],
'commentString1': [
(r'[^(\^")]+', Comment),
(escape_re, Comment),
(r'[\(|\)]+', Comment),
(r'\^.', Comment),
(r'"', Comment, '#pop'),
],
'commentString2': [
(r'[^(\^{^})]+', Comment),
(escape_re, Comment),
(r'[\(|\)]+', Comment),
(r'\^.', Comment),
(r'{', Comment, '#push'),
(r'}', Comment, '#pop'),
],
'commentBlock': [
(r'\[',Comment, '#push'),
(r'\]',Comment, '#pop'),
(r'[^(\[\])]*', Comment),
],
}
class ABAPLexer(RegexLexer):
"""
Lexer for ABAP, SAP's integrated language.
*New in Pygments 1.1.*
"""
name = 'ABAP'
aliases = ['abap']
filenames = ['*.abap']
mimetypes = ['text/x-abap']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'common': [
(r'\s+', Text),
(r'^\*.*$', Comment.Single),
(r'\".*?\n', Comment.Single),
],
'variable-names': [
(r'<[\S_]+>', Name.Variable),
(r'[\w][\w_~]*(?:(\[\])|->\*)?', Name.Variable),
],
'root': [
include('common'),
#function calls
(r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)',
bygroups(Keyword, Text, Name.Function)),
(r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|'
r'TRANSACTION|TRANSFORMATION))\b',
Keyword),
(r'(FORM|PERFORM)(\s+)([\w_]+)',
bygroups(Keyword, Text, Name.Function)),
(r'(PERFORM)(\s+)(\()([\w_]+)(\))',
bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )),
(r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)',
bygroups(Keyword, Text, Name.Function, Text, Keyword)),
# method implementation
(r'(METHOD)(\s+)([\w_~]+)',
bygroups(Keyword, Text, Name.Function)),
# method calls
(r'(\s+)([\w_\-]+)([=\-]>)([\w_\-~]+)',
bygroups(Text, Name.Variable, Operator, Name.Function)),
# call methodnames returning style
(r'(?<=(=|-)>)([\w_\-~]+)(?=\()', Name.Function),
# keywords with dashes in them.
# these need to be first, because for instance the -ID part
# of MESSAGE-ID wouldn't get highlighted if MESSAGE was
# first in the list of keywords.
(r'(ADD-CORRESPONDING|AUTHORITY-CHECK|'
r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|'
r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|'
r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|'
r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|'
r'INTERFACE-POOL|INVERTED-DATE|'
r'LOAD-OF-PROGRAM|LOG-POINT|'
r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|'
r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|'
r'OUTPUT-LENGTH|PRINT-CONTROL|'
r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|'
r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|'
r'TYPE-POOL|TYPE-POOLS'
r')\b', Keyword),
# keyword kombinations
(r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|'
r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|'
r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|'
r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|'
r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|'
r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|'
r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|'
r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|'
r'RUN\s+TIME|TIME\s+(STAMP)?)?|'
r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|'
r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|'
r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|'
r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|'
r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|'
r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|'
r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|'
r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|'
r'DATABASE|SHARED\s+(MEMORY|BUFFER))|'
r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|'
r'FREE\s(MEMORY|OBJECT)?|'
r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|'
r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|'
r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|'
r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|'
r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|'
r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|'
r'SCREEN)|COMMENT|FUNCTION\s+KEY|'
r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|'
r'SKIP|ULINE)|'
r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|'
r'TO LIST-PROCESSING|TO TRANSACTION)'
r'(ENDING|STARTING)\s+AT|'
r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|'
r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|'
r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|'
r'(BEGIN|END)\s+OF|'
r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|'
r'COMPARING(\s+ALL\s+FIELDS)?|'
r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|'
r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|'
r'END-OF-(DEFINITION|PAGE|SELECTION)|'
r'WITH\s+FRAME(\s+TITLE)|'
# simple kombinations
r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|'
r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|'
r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|'
r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|'
r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|'
r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|'
r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword),
# single word keywords.
(r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|'
r'ASSIGN(ING)?|AT(\s+FIRST)?|'
r'BACK|BLOCK|BREAK-POINT|'
r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|'
r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|'
r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|'
r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|'
r'DETAIL|DIRECTORY|DIVIDE|DO|'
r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|'
r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|'
r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|'
r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|'
r'HIDE|'
r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|'
r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|'
r'LENGTH|LINES|LOAD|LOCAL|'
r'JOIN|'
r'KEY|'
r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|'
r'NODES|'
r'OBLIGATORY|OF|OFF|ON|OVERLAY|'
r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|'
r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|'
r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|'
r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|'
r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|'
r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|'
r'ULINE|UNDER|UNPACK|UPDATE|USING|'
r'VALUE|VALUES|VIA|'
r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword),
# builtins
(r'(abs|acos|asin|atan|'
r'boolc|boolx|bit_set|'
r'char_off|charlen|ceil|cmax|cmin|condense|contains|'
r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|'
r'count|count_any_of|count_any_not_of|'
r'dbmaxlen|distance|'
r'escape|exp|'
r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|'
r'insert|'
r'lines|log|log10|'
r'match|matches|'
r'nmax|nmin|numofchar|'
r'repeat|replace|rescale|reverse|round|'
r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|'
r'substring|substring_after|substring_from|substring_before|substring_to|'
r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|'
r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)),
(r'&[0-9]', Name),
(r'[0-9]+', Number.Integer),
# operators which look like variable names before
# parsing variable names.
(r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|'
r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|'
r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator),
include('variable-names'),
# standard oparators after variable names,
# because < and > are part of field symbols.
(r'[?*<>=\-+]', Operator),
(r"'(''|[^'])*'", String.Single),
(r'[/;:()\[\],\.]', Punctuation)
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root' : [
(r'\b(Newsqueak2)\b',Keyword.Declaration),
(r"'[^']*'",String),
(r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)',
bygroups(Keyword.Declaration,Text,Name.Class,Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function,Text,Name.Variable)),
(r'([a-zA-Z0-9_]+)(\s*)(=)',
bygroups(Name.Attribute,Text,Operator)),
(r'<[a-zA-Z0-9_]+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+',Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace' : [
(r'\s+', Text),
(r'"[^"]*"', Comment)
]
}
class GherkinLexer(RegexLexer):
"""
For `Gherkin <http://cukes.info/>` syntax.
*New in Pygments 1.2.*
"""
name = 'Gherkin'
aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin']
filenames = ['*.feature']
mimetypes = ['text/x-gherkin']
feature_keywords_regexp = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функционалност|Функционал|Особина|Могућност|Özellik|Właściwość|Tính năng|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
scenario_keywords_regexp = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарио|Сценарий структураси|Сценарий|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Основа|Концепт|Контекст|Założenia|Tình huống|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
examples_regexp = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
step_keywords_regexp = ur'^(\s*)(하지만|조건|만일|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Унда |То |Онда |Но |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Агар |А |Și |És |anrhegedig a |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Yna |Ya know how |Ya gotta |Y |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Donat |Donada |Diyelim ki |Dengan |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |But y\'all |But |Biết |Bet |BUT |Atunci |And y\'all |And |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
tokens = {
'comments': [
(r'#.*$', Comment)
],
'multiline_descriptions' : [
(step_keywords_regexp, Keyword, "#pop"),
include('comments'),
(r"(\s|.)", Name.Constant)
],
'multiline_descriptions_on_stack' : [
(step_keywords_regexp, Keyword, "#pop:2"),
include('comments'),
(r"(\s|.)", Name.Constant)
],
'scenario_table_description': [
(r"\s+\|", Text, 'scenario_table_header'),
include('comments'),
(r"(\s|.)", Name.Constant)
],
'scenario_table_header': [
(r"\s+\|\s*$", Text, "#pop:2"),
(r"(\s+\|\s*)(#.*)$", bygroups(Text, Comment), "#pop:2"),
include('comments'),
(r"\s+\|", Text),
(r"[^\|]", Name.Variable)
],
'scenario_sections_on_stack': [
(scenario_keywords_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"multiline_descriptions_on_stack")
],
'narrative': [
include('scenario_sections_on_stack'),
(r"(\s|.)", Name.Builtin)
],
'table_vars': [
(r'(<[^>]*>)', bygroups(Name.Variable))
],
'string': [
include('table_vars'),
(r'(\s|.)', String),
],
'py_string': [
(r'"""', String, "#pop"),
include('string'),
],
'double_string': [
(r'"', String, "#pop"),
include('string'),
],
'root': [
(r'\n', Text),
include('comments'),
(r'"""', String, "py_string"),
(r'"', String, "double_string"),
include('table_vars'),
(r'@[^@\s]+', Name.Namespace),
(step_keywords_regexp, bygroups(Text, Keyword)),
(feature_keywords_regexp,
bygroups(Name.Class, Name.Class, Name.Constant), 'narrative'),
(scenario_keywords_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"multiline_descriptions"),
(examples_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"scenario_table_description"),
(r'(\s|.)', Text)
]
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
*New in Pygments 1.2.*
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
('', Text, 'statement'),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
('{', Punctuation, '#push'),
('}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
|
pombredanne/SmartNotes | refs/heads/master | appengine_django/management/commands/console.py | 49 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import code
import getpass
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from google.appengine.ext.remote_api import remote_api_stub
def auth_func():
return raw_input('Username:'), getpass.getpass('Password:')
class Command(BaseCommand):
""" Start up an interactive console backed by your app using remote_api """
help = 'Start up an interactive console backed by your app using remote_api.'
def run_from_argv(self, argv):
app_id = argv[2]
if len(argv) > 3:
host = argv[3]
else:
host = '%s.appspot.com' % app_id
remote_api_stub.ConfigureRemoteDatastore(app_id,
'/remote_api',
auth_func,
host)
code.interact('App Engine interactive console for %s' % (app_id,),
None,
locals())
|
drglove/SickRage | refs/heads/master | lib/unidecode/x05b.py | 252 | data = (
'Gui ', # 0x00
'Deng ', # 0x01
'Zhi ', # 0x02
'Xu ', # 0x03
'Yi ', # 0x04
'Hua ', # 0x05
'Xi ', # 0x06
'Hui ', # 0x07
'Rao ', # 0x08
'Xi ', # 0x09
'Yan ', # 0x0a
'Chan ', # 0x0b
'Jiao ', # 0x0c
'Mei ', # 0x0d
'Fan ', # 0x0e
'Fan ', # 0x0f
'Xian ', # 0x10
'Yi ', # 0x11
'Wei ', # 0x12
'Jiao ', # 0x13
'Fu ', # 0x14
'Shi ', # 0x15
'Bi ', # 0x16
'Shan ', # 0x17
'Sui ', # 0x18
'Qiang ', # 0x19
'Lian ', # 0x1a
'Huan ', # 0x1b
'Xin ', # 0x1c
'Niao ', # 0x1d
'Dong ', # 0x1e
'Yi ', # 0x1f
'Can ', # 0x20
'Ai ', # 0x21
'Niang ', # 0x22
'Neng ', # 0x23
'Ma ', # 0x24
'Tiao ', # 0x25
'Chou ', # 0x26
'Jin ', # 0x27
'Ci ', # 0x28
'Yu ', # 0x29
'Pin ', # 0x2a
'Yong ', # 0x2b
'Xu ', # 0x2c
'Nai ', # 0x2d
'Yan ', # 0x2e
'Tai ', # 0x2f
'Ying ', # 0x30
'Can ', # 0x31
'Niao ', # 0x32
'Wo ', # 0x33
'Ying ', # 0x34
'Mian ', # 0x35
'Kaka ', # 0x36
'Ma ', # 0x37
'Shen ', # 0x38
'Xing ', # 0x39
'Ni ', # 0x3a
'Du ', # 0x3b
'Liu ', # 0x3c
'Yuan ', # 0x3d
'Lan ', # 0x3e
'Yan ', # 0x3f
'Shuang ', # 0x40
'Ling ', # 0x41
'Jiao ', # 0x42
'Niang ', # 0x43
'Lan ', # 0x44
'Xian ', # 0x45
'Ying ', # 0x46
'Shuang ', # 0x47
'Shuai ', # 0x48
'Quan ', # 0x49
'Mi ', # 0x4a
'Li ', # 0x4b
'Luan ', # 0x4c
'Yan ', # 0x4d
'Zhu ', # 0x4e
'Lan ', # 0x4f
'Zi ', # 0x50
'Jie ', # 0x51
'Jue ', # 0x52
'Jue ', # 0x53
'Kong ', # 0x54
'Yun ', # 0x55
'Zi ', # 0x56
'Zi ', # 0x57
'Cun ', # 0x58
'Sun ', # 0x59
'Fu ', # 0x5a
'Bei ', # 0x5b
'Zi ', # 0x5c
'Xiao ', # 0x5d
'Xin ', # 0x5e
'Meng ', # 0x5f
'Si ', # 0x60
'Tai ', # 0x61
'Bao ', # 0x62
'Ji ', # 0x63
'Gu ', # 0x64
'Nu ', # 0x65
'Xue ', # 0x66
'[?] ', # 0x67
'Zhuan ', # 0x68
'Hai ', # 0x69
'Luan ', # 0x6a
'Sun ', # 0x6b
'Huai ', # 0x6c
'Mie ', # 0x6d
'Cong ', # 0x6e
'Qian ', # 0x6f
'Shu ', # 0x70
'Chan ', # 0x71
'Ya ', # 0x72
'Zi ', # 0x73
'Ni ', # 0x74
'Fu ', # 0x75
'Zi ', # 0x76
'Li ', # 0x77
'Xue ', # 0x78
'Bo ', # 0x79
'Ru ', # 0x7a
'Lai ', # 0x7b
'Nie ', # 0x7c
'Nie ', # 0x7d
'Ying ', # 0x7e
'Luan ', # 0x7f
'Mian ', # 0x80
'Zhu ', # 0x81
'Rong ', # 0x82
'Ta ', # 0x83
'Gui ', # 0x84
'Zhai ', # 0x85
'Qiong ', # 0x86
'Yu ', # 0x87
'Shou ', # 0x88
'An ', # 0x89
'Tu ', # 0x8a
'Song ', # 0x8b
'Wan ', # 0x8c
'Rou ', # 0x8d
'Yao ', # 0x8e
'Hong ', # 0x8f
'Yi ', # 0x90
'Jing ', # 0x91
'Zhun ', # 0x92
'Mi ', # 0x93
'Zhu ', # 0x94
'Dang ', # 0x95
'Hong ', # 0x96
'Zong ', # 0x97
'Guan ', # 0x98
'Zhou ', # 0x99
'Ding ', # 0x9a
'Wan ', # 0x9b
'Yi ', # 0x9c
'Bao ', # 0x9d
'Shi ', # 0x9e
'Shi ', # 0x9f
'Chong ', # 0xa0
'Shen ', # 0xa1
'Ke ', # 0xa2
'Xuan ', # 0xa3
'Shi ', # 0xa4
'You ', # 0xa5
'Huan ', # 0xa6
'Yi ', # 0xa7
'Tiao ', # 0xa8
'Shi ', # 0xa9
'Xian ', # 0xaa
'Gong ', # 0xab
'Cheng ', # 0xac
'Qun ', # 0xad
'Gong ', # 0xae
'Xiao ', # 0xaf
'Zai ', # 0xb0
'Zha ', # 0xb1
'Bao ', # 0xb2
'Hai ', # 0xb3
'Yan ', # 0xb4
'Xiao ', # 0xb5
'Jia ', # 0xb6
'Shen ', # 0xb7
'Chen ', # 0xb8
'Rong ', # 0xb9
'Huang ', # 0xba
'Mi ', # 0xbb
'Kou ', # 0xbc
'Kuan ', # 0xbd
'Bin ', # 0xbe
'Su ', # 0xbf
'Cai ', # 0xc0
'Zan ', # 0xc1
'Ji ', # 0xc2
'Yuan ', # 0xc3
'Ji ', # 0xc4
'Yin ', # 0xc5
'Mi ', # 0xc6
'Kou ', # 0xc7
'Qing ', # 0xc8
'Que ', # 0xc9
'Zhen ', # 0xca
'Jian ', # 0xcb
'Fu ', # 0xcc
'Ning ', # 0xcd
'Bing ', # 0xce
'Huan ', # 0xcf
'Mei ', # 0xd0
'Qin ', # 0xd1
'Han ', # 0xd2
'Yu ', # 0xd3
'Shi ', # 0xd4
'Ning ', # 0xd5
'Qin ', # 0xd6
'Ning ', # 0xd7
'Zhi ', # 0xd8
'Yu ', # 0xd9
'Bao ', # 0xda
'Kuan ', # 0xdb
'Ning ', # 0xdc
'Qin ', # 0xdd
'Mo ', # 0xde
'Cha ', # 0xdf
'Ju ', # 0xe0
'Gua ', # 0xe1
'Qin ', # 0xe2
'Hu ', # 0xe3
'Wu ', # 0xe4
'Liao ', # 0xe5
'Shi ', # 0xe6
'Zhu ', # 0xe7
'Zhai ', # 0xe8
'Shen ', # 0xe9
'Wei ', # 0xea
'Xie ', # 0xeb
'Kuan ', # 0xec
'Hui ', # 0xed
'Liao ', # 0xee
'Jun ', # 0xef
'Huan ', # 0xf0
'Yi ', # 0xf1
'Yi ', # 0xf2
'Bao ', # 0xf3
'Qin ', # 0xf4
'Chong ', # 0xf5
'Bao ', # 0xf6
'Feng ', # 0xf7
'Cun ', # 0xf8
'Dui ', # 0xf9
'Si ', # 0xfa
'Xun ', # 0xfb
'Dao ', # 0xfc
'Lu ', # 0xfd
'Dui ', # 0xfe
'Shou ', # 0xff
)
|
pombredanne/dynd-python | refs/heads/master | dynd/tests/test_nd_groupby.py | 3 | import sys
import unittest
from dynd import nd, ndt
"""
Todo: Fix this
class TestGroupBy(unittest.TestCase):
def test_immutable(self):
a = nd.array([
('x', 0),
('y', 1),
('x', 2),
('x', 3),
('y', 4)],
dtype='{A: string, B: int32}').eval_immutable()
gb = nd.groupby(a, nd.fields(a, 'A'))
self.assertEqual(nd.as_py(gb.groups), [{'A': 'x'}, {'A': 'y'}])
# TODO: This test fails since we modernized comparisons
# self.assertEqual(nd.as_py(gb), [
# [{'A': 'x', 'B': 0},
# {'A': 'x', 'B': 2},
# {'A': 'x', 'B': 3}],
# [{'A': 'y', 'B': 1},
# {'A': 'y', 'B': 4}]])
def test_grouped_slices(self):
a = nd.asarray([[1, 2, 3], [1, 4, 5]])
gb = nd.groupby(a[:, 1:], a[:, 0])
self.assertEqual(nd.as_py(gb.groups), [1])
self.assertEqual(nd.as_py(gb), [[[2, 3], [4, 5]]])
a = nd.asarray([[1, 2, 3], [3, 1, 7], [1, 4, 5], [2, 6, 7], [3, 2, 5]])
gb = nd.groupby(a[:, 1:], a[:, 0])
self.assertEqual(nd.as_py(gb.groups), [1, 2, 3])
self.assertEqual(nd.as_py(gb), [[[2, 3], [4, 5]],
[[6, 7]],
[[1, 7], [2, 5]]])
"""
if __name__ == '__main__':
unittest.main()
|
renaelectronics/linuxcnc | refs/heads/master | tests/motion-logger/mountaindew/test-ui.py | 16 | #!/usr/bin/env python
import linuxcnc
import hal
import time
import sys
#
# connect to LinuxCNC
#
c = linuxcnc.command()
s = linuxcnc.stat()
e = linuxcnc.error_channel()
#
# Come out of E-stop, turn the machine on, home, and switch to Auto mode.
#
c.state(linuxcnc.STATE_ESTOP_RESET)
c.state(linuxcnc.STATE_ON)
c.mode(linuxcnc.MODE_AUTO)
#
# run the .ngc test file, starting from the special line
#
c.program_open('mountaindew.ngc')
c.auto(linuxcnc.AUTO_RUN, 4)
c.wait_complete()
sys.exit(0)
|
google/google-ctf | refs/heads/master | third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/profile.py | 50 | #! /usr/bin/env python
#
# Class for profiling python code. rev 1.0 6/2/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
"""Class for profiling Python code."""
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import os
import time
import marshal
from optparse import OptionParser
__all__ = ["run", "runctx", "help", "Profile"]
# Sample timer for use with
#i_count = 0
#def integer_timer():
# global i_count
# i_count = i_count + 1
# return i_count
#itimes = integer_timer # replace with C coded timer returning integers
#**************************************************************************
# The following are the static member functions for the profiler class
# Note that an instance of Profile() is *not* needed to call them.
#**************************************************************************
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
def runctx(statement, globals, locals, filename=None, sort=-1):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
# Backwards compatibility.
def help():
print "Documentation for the profile module can be found "
print "in the Python Library Reference, section 'The Python Profiler'."
if hasattr(os, "times"):
def _get_time_times(timer=os.times):
t = timer()
return t[0] + t[1]
# Using getrusage(3) is better than clock(3) if available:
# on some systems (e.g. FreeBSD), getrusage has a higher resolution
# Furthermore, on a POSIX system, returns microseconds, which
# wrap around after 36min.
_has_res = 0
try:
import resource
resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)
def _get_time_resource(timer=resgetrusage):
t = timer()
return t[0] + t[1]
_has_res = 1
except ImportError:
pass
class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact (frame and previous tuple). In case an internal error is
detected, the -3 element is used as the function name.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions (this latter is tallied in cur[2]).
[ 2] = Total time spent in subfunctions, excluding time executing the
frame's function (this latter is tallied in cur[1]).
[-3] = Name of the function that corresponds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling).
[-1] = Our parent 6-tuple (corresponds to frame.f_back).
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[-3].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[4] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
bias = 0 # calibration constant
def __init__(self, timer=None, bias=None):
self.timings = {}
self.cur = None
self.cmd = ""
self.c_func_name = ""
if bias is None:
bias = self.bias
self.bias = bias # Materialize in local dict for lookup speed.
if not timer:
if _has_res:
self.timer = resgetrusage
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_resource
elif hasattr(time, 'clock'):
self.timer = self.get_time = time.clock
self.dispatcher = self.trace_dispatch_i
elif hasattr(os, 'times'):
self.timer = os.times
self.dispatcher = self.trace_dispatch
self.get_time = _get_time_times
else:
self.timer = self.get_time = time.time
self.dispatcher = self.trace_dispatch_i
else:
self.timer = timer
t = self.timer() # test out timer function
try:
length = len(t)
except TypeError:
self.get_time = timer
self.dispatcher = self.trace_dispatch_i
else:
if length == 2:
self.dispatcher = self.trace_dispatch
else:
self.dispatcher = self.trace_dispatch_l
# This get_time() implementation needs to be defined
# here to capture the passed-in timer in the parameter
# list (for performance). Note that we can't assume
# the timer() result contains two values in all
# cases.
def get_time_timer(timer=timer, sum=sum):
return sum(timer())
self.get_time = get_time_timer
self.t = self.get_time()
self.simulate_call('profiler')
# Heavily optimized dispatch routine for os.times() timer
def trace_dispatch(self, frame, event, arg):
timer = self.timer
t = timer()
t = t[0] + t[1] - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame,t):
t = timer()
self.t = t[0] + t[1]
else:
r = timer()
self.t = r[0] + r[1] - t # put back unrecorded delta
# Dispatch routine for best timer program (return = scalar, fastest if
# an integer but float works too -- and time.clock() relies on that).
def trace_dispatch_i(self, frame, event, arg):
timer = self.timer
t = timer() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()
else:
self.t = timer() - t # put back unrecorded delta
# Dispatch routine for macintosh (timer returns time in ticks of
# 1/60th second)
def trace_dispatch_mac(self, frame, event, arg):
timer = self.timer
t = timer()/60.0 - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = timer()/60.0
else:
self.t = timer()/60.0 - t # put back unrecorded delta
# SLOW generic dispatch routine for timer returning lists of numbers
def trace_dispatch_l(self, frame, event, arg):
get_time = self.get_time
t = get_time() - self.t - self.bias
if event == "c_call":
self.c_func_name = arg.__name__
if self.dispatch[event](self, frame, t):
self.t = get_time()
else:
self.t = get_time() - t # put back unrecorded delta
# In the event handlers, the first 3 elements of self.cur are unpacked
# into vrbls w/ 3-letter names. The last two characters are meant to be
# mnemonic:
# _pt self.cur[0] "parent time" time to be charged to parent frame
# _it self.cur[1] "internal time" time spent directly in the function
# _et self.cur[2] "external time" time spent in subfunctions
def trace_dispatch_exception(self, frame, t):
rpt, rit, ret, rfn, rframe, rcur = self.cur
if (rframe is not frame) and rcur:
return self.trace_dispatch_return(rframe, t)
self.cur = rpt, rit+t, ret, rfn, rframe, rcur
return 1
def trace_dispatch_call(self, frame, t):
if self.cur and frame.f_back is not self.cur[-2]:
rpt, rit, ret, rfn, rframe, rcur = self.cur
if not isinstance(rframe, Profile.fake_frame):
assert rframe.f_back is frame.f_back, ("Bad call", rfn,
rframe, rframe.f_back,
frame, frame.f_back)
self.trace_dispatch_return(rframe, 0)
assert (self.cur is None or \
frame.f_back is self.cur[-2]), ("Bad call",
self.cur[-3])
fcode = frame.f_code
fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns + 1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_c_call (self, frame, t):
fn = ("", 0, self.c_func_name)
self.cur = (t, 0, 0, fn, frame, self.cur)
timings = self.timings
if fn in timings:
cc, ns, tt, ct, callers = timings[fn]
timings[fn] = cc, ns+1, tt, ct, callers
else:
timings[fn] = 0, 0, 0, 0, {}
return 1
def trace_dispatch_return(self, frame, t):
if frame is not self.cur[-2]:
assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
self.trace_dispatch_return(self.cur[-2], 0)
# Prefix "r" means part of the Returning or exiting frame.
# Prefix "p" means part of the Previous or Parent or older frame.
rpt, rit, ret, rfn, frame, rcur = self.cur
rit = rit + t
frame_total = rit + ret
ppt, pit, pet, pfn, pframe, pcur = rcur
self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
timings = self.timings
cc, ns, tt, ct, callers = timings[rfn]
if not ns:
# This is the only occurrence of the function on the stack.
# Else this is a (directly or indirectly) recursive call, and
# its cumulative time will get updated when the topmost call to
# it returns.
ct = ct + frame_total
cc = cc + 1
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
# stats such as the amount of time added to ct courtesy
# of this specific call, and the contribution to cc
# courtesy of this call.
else:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
return 1
dispatch = {
"call": trace_dispatch_call,
"exception": trace_dispatch_exception,
"return": trace_dispatch_return,
"c_call": trace_dispatch_c_call,
"c_exception": trace_dispatch_return, # the C function returned
"c_return": trace_dispatch_return,
}
# The next few functions play with self.cmd. By carefully preloading
# our parallel stack, we can force the profiled result to include
# an arbitrary string as the name of the calling function.
# We use self.cmd as that string, and the resulting stats look
# very nice :-).
def set_cmd(self, cmd):
if self.cur[-1]: return # already set
self.cmd = cmd
self.simulate_call(cmd)
class fake_code:
def __init__(self, filename, line, name):
self.co_filename = filename
self.co_line = line
self.co_name = name
self.co_firstlineno = 0
def __repr__(self):
return repr((self.co_filename, self.co_line, self.co_name))
class fake_frame:
def __init__(self, code, prior):
self.f_code = code
self.f_back = prior
def simulate_call(self, name):
code = self.fake_code('profile', 0, name)
if self.cur:
pframe = self.cur[-2]
else:
pframe = None
frame = self.fake_frame(code, pframe)
self.dispatch['call'](self, frame, 0)
# collect stats from pending stack, including getting final
# timings for self.cmd frame.
def simulate_cmd_complete(self):
get_time = self.get_time
t = get_time() - self.t
while self.cur[-1]:
# We *can* cause assertion errors here if
# dispatch_trace_return checks for a frame match!
self.dispatch['return'](self, self.cur[-2], t)
t = 0
self.t = get_time() - t
def print_stats(self, sort=-1):
import pstats
pstats.Stats(self).strip_dirs().sort_stats(sort). \
print_stats()
def dump_stats(self, file):
f = open(file, 'wb')
self.create_stats()
marshal.dump(self.stats, f)
f.close()
def create_stats(self):
self.simulate_cmd_complete()
self.snapshot_stats()
def snapshot_stats(self):
self.stats = {}
for func, (cc, ns, tt, ct, callers) in self.timings.iteritems():
callers = callers.copy()
nc = 0
for callcnt in callers.itervalues():
nc += callcnt
self.stats[func] = cc, nc, tt, ct, callers
# The following two methods can be called by clients to use
# a profiler to profile a statement, given as a string.
def run(self, cmd):
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
self.set_cmd(cmd)
sys.setprofile(self.dispatcher)
try:
exec cmd in globals, locals
finally:
sys.setprofile(None)
return self
# This method is more useful to profile a single function call.
def runcall(self, func, *args, **kw):
self.set_cmd(repr(func))
sys.setprofile(self.dispatcher)
try:
return func(*args, **kw)
finally:
sys.setprofile(None)
#******************************************************************
# The following calculates the overhead for using a profiler. The
# problem is that it takes a fair amount of time for the profiler
# to stop the stopwatch (from the time it receives an event).
# Similarly, there is a delay from the time that the profiler
# re-starts the stopwatch before the user's code really gets to
# continue. The following code tries to measure the difference on
# a per-event basis.
#
# Note that this difference is only significant if there are a lot of
# events, and relatively little user code per event. For example,
# code with small functions will typically benefit from having the
# profiler calibrated for the current platform. This *could* be
# done on the fly during init() time, but it is not worth the
# effort. Also note that if too large a value specified, then
# execution time on some functions will actually appear as a
# negative number. It is *normal* for some functions (with very
# low call counts) to have such negative stats, even if the
# calibration figure is "correct."
#
# One alternative to profile-time calibration adjustments (i.e.,
# adding in the magic little delta during each event) is to track
# more carefully the number of events (and cumulatively, the number
# of events during sub functions) that are seen. If this were
# done, then the arithmetic could be done after the fact (i.e., at
# display time). Currently, we track only call/return events.
# These values can be deduced by examining the callees and callers
# vectors for each functions. Hence we *can* almost correct the
# internal time figure at print time (note that we currently don't
# track exception event processing counts). Unfortunately, there
# is currently no similar information for cumulative sub-function
# time. It would not be hard to "get all this info" at profiler
# time. Specifically, we would have to extend the tuples to keep
# counts of this in each frame, and then extend the defs of timing
# tuples to include the significant two figures. I'm a bit fearful
# that this additional feature will slow the heavily optimized
# event/time ratio (i.e., the profiler would run slower, fur a very
# low "value added" feature.)
#**************************************************************
def calibrate(self, m, verbose=0):
if self.__class__ is not Profile:
raise TypeError("Subclasses must override .calibrate().")
saved_bias = self.bias
self.bias = 0
try:
return self._calibrate_inner(m, verbose)
finally:
self.bias = saved_bias
def _calibrate_inner(self, m, verbose):
get_time = self.get_time
# Set up a test case to be run with and without profiling. Include
# lots of calls, because we're trying to quantify stopwatch overhead.
# Do not raise any exceptions, though, because we want to know
# exactly how many profile events are generated (one call event, +
# one return event, per Python-level call).
def f1(n):
for i in range(n):
x = 1
def f(m, f1=f1):
for i in range(m):
f1(100)
f(m) # warm up the cache
# elapsed_noprofile <- time f(m) takes without profiling.
t0 = get_time()
f(m)
t1 = get_time()
elapsed_noprofile = t1 - t0
if verbose:
print "elapsed time without profiling =", elapsed_noprofile
# elapsed_profile <- time f(m) takes with profiling. The difference
# is profiling overhead, only some of which the profiler subtracts
# out on its own.
p = Profile()
t0 = get_time()
p.runctx('f(m)', globals(), locals())
t1 = get_time()
elapsed_profile = t1 - t0
if verbose:
print "elapsed time with profiling =", elapsed_profile
# reported_time <- "CPU seconds" the profiler charged to f and f1.
total_calls = 0.0
reported_time = 0.0
for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
p.timings.items():
if funcname in ("f", "f1"):
total_calls += cc
reported_time += tt
if verbose:
print "'CPU seconds' profiler reported =", reported_time
print "total # calls =", total_calls
if total_calls != m + 1:
raise ValueError("internal error: total calls = %d" % total_calls)
# reported_time - elapsed_noprofile = overhead the profiler wasn't
# able to measure. Divide by twice the number of calls (since there
# are two profiler events per call in this test) to get the hidden
# overhead per event.
mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
if verbose:
print "mean stopwatch overhead per profile event =", mean
return mean
#****************************************************************************
def Stats(*args):
print 'Report generating functions are in the "pstats" module\a'
def main():
usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
runctx(code, globs, None, options.outfile, options.sort)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
|
markgw/jazzparser | refs/heads/master | annotator/annotator/bin/count_chord_type_use.py | 1 | """
Prints a count of the
"""
import sys
from django.db.models import Count
from apps.sequences.models import Chord, ChordType
from optparse import OptionParser
from jazzparser.tableprint import pprint_table
def count_chords(options, arguments):
table = [["Chord type", "Count"]]
for ctype in ChordType.objects.all():
table.append(["%s" % ctype, "%s" % ctype.chord_set.count()])
# Justification of columns
justs = [False, True]
# Print out the table
print
pprint_table(sys.stdout, table, justs, "|")
print "Total chords: %s" % Chord.objects.count()
return 0
def main():
parser = OptionParser()
options, arguments = parser.parse_args()
sys.exit(count_chords(options, arguments))
if __name__ == "__main__":
main()
|
saurabh6790/ON-RISAPP | refs/heads/master | stock/report/delivery_note_trends/delivery_note_trends.py | 30 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from controllers.trends import get_columns,get_data
def execute(filters=None):
if not filters: filters ={}
data = []
conditions = get_columns(filters, "Delivery Note")
data = get_data(filters, conditions)
return conditions["columns"], data |
amenonsen/ansible | refs/heads/devel | lib/ansible/modules/cloud/google/gcp_sql_user_info.py | 5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_sql_user_info
description:
- Gather info for GCP User
- This module was called C(gcp_sql_user_facts) before Ansible 2.9. The usage has not
changed.
short_description: Gather info for GCP User
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
- 'This field represents a link to a Instance resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''name'' and value of
your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_sql_instance task and then set this instance field to "{{ name-of-resource
}}"'
required: true
type: dict
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: get info on a user
gcp_sql_user_info:
instance: "{{ instance }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
host:
description:
- The host name from which the user can connect. For insert operations, host
defaults to an empty string. For update operations, host is specified as part
of the request URL. The host name cannot be updated after insertion.
returned: success
type: str
name:
description:
- The name of the user in the Cloud SQL instance.
returned: success
type: str
instance:
description:
- The name of the Cloud SQL instance. This does not include the project ID.
returned: success
type: dict
password:
description:
- The password for the user.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(instance=dict(required=True, type='dict')))
if module._name == 'gcp_sql_user_facts':
module.deprecate("The 'gcp_sql_user_facts' module has been renamed to 'gcp_sql_user_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/sqlservice.admin']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
res = {'project': module.params['project'], 'instance': replace_resource_dict(module.params['instance'], 'name')}
return "https://www.googleapis.com/sql/v1beta4/projects/{project}/instances/{instance}/users".format(**res)
def fetch_list(module, link):
auth = GcpSession(module, 'sql')
return auth.list(link, return_if_object, array_name='items')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
vvv1559/intellij-community | refs/heads/master | python/lib/Lib/distutils/command/install_data.py | 138 | """distutils.command.install_data
Implements the Distutils 'install_data' command, for installing
platform-independent data files."""
# contributed by Bastian Kleineidam
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_data.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from types import StringType
from distutils.core import Command
from distutils.util import change_root, convert_path
class install_data (Command):
description = "install data files"
user_options = [
('install-dir=', 'd',
"base directory for installing data files "
"(default: installation base dir)"),
('root=', None,
"install everything relative to this alternate root directory"),
('force', 'f', "force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options (self):
self.install_dir = None
self.outfiles = []
self.root = None
self.force = 0
self.data_files = self.distribution.data_files
self.warn_dir = 1
def finalize_options (self):
self.set_undefined_options('install',
('install_data', 'install_dir'),
('root', 'root'),
('force', 'force'),
)
def run (self):
self.mkpath(self.install_dir)
for f in self.data_files:
if type(f) is StringType:
# it's a simple file, so copy it
f = convert_path(f)
if self.warn_dir:
self.warn("setup script did not provide a directory for "
"'%s' -- installing right in '%s'" %
(f, self.install_dir))
(out, _) = self.copy_file(f, self.install_dir)
self.outfiles.append(out)
else:
# it's a tuple with path to install to and a list of files
dir = convert_path(f[0])
if not os.path.isabs(dir):
dir = os.path.join(self.install_dir, dir)
elif self.root:
dir = change_root(self.root, dir)
self.mkpath(dir)
if f[1] == []:
# If there are no files listed, the user must be
# trying to create an empty directory, so add the
# directory to the list of output files.
self.outfiles.append(dir)
else:
# Copy files, adding them to the list of output files.
for data in f[1]:
data = convert_path(data)
(out, _) = self.copy_file(data, dir)
self.outfiles.append(out)
def get_inputs (self):
return self.data_files or []
def get_outputs (self):
return self.outfiles
|
zbal/ansible | refs/heads/devel | lib/ansible/playbook/__init__.py | 108 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins import get_all_plugin_loaders
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = os.getcwd()
self._loader = loader
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
self._loader.set_basedir(self._basedir)
# dynamically load any plugins from the playbook directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(self._basedir, obj.subdir)
if os.path.isdir(plugin_path):
obj.add_directory(plugin_path)
ds = self._loader.load_from_file(os.path.basename(file_name))
if not isinstance(ds, list):
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if 'include' in entry:
pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
if pb is not None:
self._entries.extend(pb._entries)
else:
display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan')
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
self._entries.append(entry_obj)
def get_loader(self):
return self._loader
def get_plays(self):
return self._entries[:]
|
odyaka341/pyglet | refs/heads/master | contrib/spryte/tilemap.py | 29 | from pyglet import image
import spryte
class Map(spryte.SpriteBatch):
'''Rectangular map.
"cells" argument must be a row-major list of lists of Sprite instances.
'''
def set_cells(self, cell_width, cell_height, cells, origin=None):
self.cell_width, self.cell_height = cell_width, cell_height
if origin is None:
origin = (0, 0)
self.x, self.y = origin
self.cells = cells
self.pixel_width = len(cells[0]) * cell_width
self.pixel_height = len(cells) * cell_height
def get_cell(self, x, y):
''' Return Cell at cell pos=(x,y).
Return None if out of bounds.'''
if x < 0 or y < 0:
return None
try:
return self.cells[y][x]
except IndexError:
return None
def get_in_region(self, x1, y1, x2, y2):
'''Return cells that are within the pixel bounds specified by the
bottom-left (x1, y1) and top-right (x2, y2) corners.
'''
x1 = max(0, x1 // self.cell_width)
y1 = max(0, y1 // self.cell_height)
x2 = min(len(self.cells[0]), x2 // self.cell_width + 1)
y2 = min(len(self.cells), y2 // self.cell_height + 1)
return [self.cells[y][x] for x in range(x1, x2) for y in range(y1, y2)]
def get(self, x, y):
''' Return Cell at pixel px=(x,y).
Return None if out of bounds.'''
return self.get_cell(x // self.cell_width, y // self.cell_height)
UP = (0, 1)
DOWN = (0, -1)
LEFT = (-1, 0)
RIGHT = (1, 0)
def get_neighbor(self, cell, direction):
'''Get the neighbor Cell in the given direction (dx, dy) which
is one of self.UP, self.DOWN, self.LEFT or self.RIGHT.
Returns None if out of bounds.
'''
dx, dy = direction
return self.get_cell(cell.x//self.cell_width + dx,
cell.y//self.cell_height + dy)
def delete(self):
for row in self.cells:
for cell in row:
cell.delete()
self.cells = []
@classmethod
def from_imagegrid(cls, im, cells, file=None, origin=None):
'''Initialise the map using an image the image grid.
Both the image grid and the map cells have y=0 at the bottom of
the grid / map.
Return a Map instance.'''
texture_sequence = im.texture_sequence
l = []
cw, ch = texture_sequence.item_width, texture_sequence.item_height
inst = cls()
for y, row in enumerate(cells):
m = []
l.append(m)
for x, num in enumerate(row):
m.append(spryte.Sprite(texture_sequence[num], x*cw, y*ch,
map=inst, batch=inst))
inst.set_cells(cw, ch, l, origin)
return inst
|
d53dave/cgopt | refs/heads/master | examples/langermann/langermann_opt.py | 2 | import math
from csaopt.utils import clamp
from typing import MutableSequence, Sequence, Any, Tuple
from math import pi
# -- Globals
m = 5
c = (1, 2, 5, 2, 3)
A = ((3, 5), (5, 2), (2, 1), (1, 4), (7, 9))
# -- Globals
# Configuration
def empty_state() -> Tuple:
return (0.0, 0.0)
# Functions
def cool(initial_temp: float, old_temp: float, step: int) -> float:
return initial_temp * math.pow(0.9, step)
def acceptance_func(e_old: float, e_new: float, temp: float, rnd: float) -> bool:
# prevent math.exp from under or overflowing, we can anyway constrain 0 < e^x <= (e^0 == 1)
x = clamp(-80, (e_old - e_new) / temp, 0.1)
return math.exp(x) > rnd
def initialize(state: MutableSequence, randoms: Sequence[float]) -> None:
for i in range(len(randoms)):
state[i] = randoms[i]
return
def evaluate(state: Sequence) -> float:
result = 0.0
for i in range(m): # sum from 0 to m-1
t2 = 0.0
for j in range(2): # sum from 0..-1
s_j = state[j]
a_ij = A[i][j]
t2 += (s_j - a_ij)**2
t2 = -(1 / pi) * t2
t3 = 0.0
for j in range(2): # sum from 0..d-1
t3 += (state[j] - A[i][j])**2
t3 = pi * t3
result += c[i] * math.exp(t2) * math.cos(t3)
return -result
def generate_next(state: Sequence, new_state: MutableSequence, randoms: Sequence[float], step: int) -> Any:
for i in range(len(state)):
new_state[i] = clamp(0, state[i] + 0.3 * randoms[i], 10)
return
|
spinellic/Mission-Planner | refs/heads/master | Lib/distutils/dir_util.py | 53 | """distutils.dir_util
Utility functions for manipulating directories and directory trees."""
__revision__ = "$Id$"
import os
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, basestring):
raise DistutilsInternalError, \
"mkpath: 'name' must be a string (got %r)" % (name,)
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def create_tree(base_dir, files, mode=0777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the a name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = {}
for file in files:
need_dir[os.path.join(base_dir, os.path.dirname(file))] = 1
need_dirs = need_dir.keys()
need_dirs.sort()
# Now create them
for dir in need_dirs:
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError, \
"cannot copy tree '%s': not a directory" % src
try:
names = os.listdir(src)
except os.error, (errno, errstr):
if dry_run:
names = []
else:
raise DistutilsFileError, \
"error listing files in '%s': %s" % (src, errstr)
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
from distutils.util import grok_environment_error
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except (IOError, OSError), exc:
log.warn(grok_environment_error(
exc, "error removing %s: " % directory))
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
|
jamslevy/gsoc | refs/heads/master | thirdparty/google_appengine/lib/django/django/db/backends/mysql_old/introspection.py | 32 | from django.db.backends.mysql_old.base import quote_name
from MySQLdb import ProgrammingError, OperationalError
from MySQLdb.constants import FIELD_TYPE
import re
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
def get_table_list(cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
def get_table_description(cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s LIMIT 1" % quote_name(table_name))
return cursor.description
def _name_to_index(cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(get_table_description(cursor, table_name))])
def get_relations(cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = _name_to_index(cursor, table_name)
constraints = []
relations = {}
try:
# This should work for MySQL 5.0.
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
constraints.extend(cursor.fetchall())
except (ProgrammingError, OperationalError):
# Fall back to "SHOW CREATE TABLE", for previous MySQL versions.
# Go through all constraints and save the equal matches.
cursor.execute("SHOW CREATE TABLE %s" % quote_name(table_name))
for row in cursor.fetchall():
pos = 0
while True:
match = foreign_key_re.search(row[1], pos)
if match == None:
break
pos = match.end()
constraints.append(match.groups())
for my_fieldname, other_table, other_field in constraints:
other_field_index = _name_to_index(cursor, other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_indexes(cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
cursor.execute("SHOW INDEX FROM %s" % quote_name(table_name))
indexes = {}
for row in cursor.fetchall():
indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
return indexes
DATA_TYPES_REVERSE = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'FloatField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'IntegerField',
FIELD_TYPE.SHORT: 'IntegerField',
FIELD_TYPE.STRING: 'TextField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
|
sunlianqiang/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/site-packages/setuptools/tests/test_develop.py | 286 | """develop tests
"""
import sys
import os, shutil, tempfile, unittest
import tempfile
import site
from distutils.errors import DistutilsError
from setuptools.command.develop import develop
from setuptools.command import easy_install as easy_install_pkg
from setuptools.compat import StringIO
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo',
packages=['foo'],
use_2to3=True,
)
"""
INIT_PY = """print "foo"
"""
class TestDevelopTest(unittest.TestCase):
def setUp(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
# Directory structure
self.dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self.dir, 'foo'))
# setup.py
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
# foo/__init__.py
init = os.path.join(self.dir, 'foo', '__init__.py')
f = open(init, 'w')
f.write(INIT_PY)
f.close()
os.chdir(self.dir)
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix):
return
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_develop(self):
if sys.version < "2.6" or hasattr(sys, 'real_prefix'):
return
dist = Distribution(
dict(name='foo',
packages=['foo'],
use_2to3=True,
version='0.0',
))
dist.script_name = 'setup.py'
cmd = develop(dist)
cmd.user = 1
cmd.ensure_finalized()
cmd.install_dir = site.USER_SITE
cmd.user = 1
old_stdout = sys.stdout
#sys.stdout = StringIO()
try:
cmd.run()
finally:
sys.stdout = old_stdout
# let's see if we got our egg link at the right place
content = os.listdir(site.USER_SITE)
content.sort()
self.assertEqual(content, ['easy-install.pth', 'foo.egg-link'])
# Check that we are using the right code.
egg_link_file = open(os.path.join(site.USER_SITE, 'foo.egg-link'), 'rt')
try:
path = egg_link_file.read().split()[0].strip()
finally:
egg_link_file.close()
init_file = open(os.path.join(path, 'foo', '__init__.py'), 'rt')
try:
init = init_file.read().strip()
finally:
init_file.close()
if sys.version < "3":
self.assertEqual(init, 'print "foo"')
else:
self.assertEqual(init, 'print("foo")')
def notest_develop_with_setup_requires(self):
wanted = ("Could not find suitable distribution for "
"Requirement.parse('I-DONT-EXIST')")
old_dir = os.getcwd()
os.chdir(self.dir)
try:
try:
dist = Distribution({'setup_requires': ['I_DONT_EXIST']})
except DistutilsError:
e = sys.exc_info()[1]
error = str(e)
if error == wanted:
pass
finally:
os.chdir(old_dir)
|
abztrakt/django-badger | refs/heads/master | badger/tests/test_feeds.py | 3 | import logging
import feedparser
from django.conf import settings
from django.http import HttpRequest
from django.test.client import Client
from pyquery import PyQuery as pq
from nose.tools import assert_equal, with_setup, assert_false, eq_, ok_
from nose.plugins.attrib import attr
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
try:
from commons.urlresolvers import reverse
except ImportError, e:
from django.core.urlresolvers import reverse
from . import BadgerTestCase
from badger.models import (Badge, Award, Progress,
BadgeAwardNotAllowedException)
from badger.utils import get_badge, award_badge
class BadgerFeedsTest(BadgerTestCase):
def setUp(self):
self.testuser = self._get_user()
self.client = Client()
Award.objects.all().delete()
def tearDown(self):
Award.objects.all().delete()
Badge.objects.all().delete()
def test_award_feeds(self):
"""Can view award detail"""
user = self._get_user()
user2 = self._get_user(username='tester2')
b1, created = Badge.objects.get_or_create(creator=user, title="Code Badge #1")
award = b1.award_to(user2)
# The award should show up in each of these feeds.
feed_urls = (
reverse('badger.feeds.awards_recent',
args=('atom', )),
reverse('badger.feeds.awards_by_badge',
args=('atom', b1.slug, )),
reverse('badger.feeds.awards_by_user',
args=('atom', user2.username,)),
)
# Check each of the feeds
for feed_url in feed_urls:
r = self.client.get(feed_url, follow=True)
# The feed should be parsed without issues by feedparser
feed = feedparser.parse(r.content)
eq_(0, feed.bozo)
# Look through entries for the badge title
found_it = False
for entry in feed.entries:
if b1.title in entry.title and user2.username in entry.title:
found_it = True
ok_(found_it)
def _get_user(self, username="tester", email="tester@example.com",
password="trustno1"):
(user, created) = User.objects.get_or_create(username=username,
defaults=dict(email=email))
if created:
user.set_password(password)
user.save()
return user
|
wschoenell/chimera | refs/heads/master | src/chimera/core/eventwrapper.py | 5 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# chimera - observatory automation system
# Copyright (C) 2006-2007 P. Henrique Silva <henrique@astro.ufsc.br>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from chimera.core.proxy import ProxyMethod
from chimera.core.methodwrapper import MethodWrapperDispatcher
from chimera.core.constants import EVENTS_PROXY_NAME
import copy
__all__ = ['EventWrapperDispatcher']
class EventWrapperDispatcher (MethodWrapperDispatcher):
def __init__(self, wrapper, instance, cls):
MethodWrapperDispatcher.__init__(self, wrapper, instance, cls)
def call(self, *args, **kwargs):
if hasattr(self.instance, EVENTS_PROXY_NAME):
getattr(self.instance, EVENTS_PROXY_NAME).publish(
self.func.__name__, *args[1:], **kwargs)
return True
def __do(self, other, action):
handler = {"topic": self.func.__name__,
"handler": {"proxy": "",
"method": ""}}
# REMEBER: Return a copy of this wrapper as we are using +=
# Can't add itself as a subscriber
if other == self.func:
return copy.copy(self)
# passing a proxy method?
if not isinstance(other, ProxyMethod):
return copy.copy(self)
handler["handler"]["proxy"] = other.proxy.URI
handler["handler"]["method"] = str(other.__name__)
if hasattr(self.instance, EVENTS_PROXY_NAME):
proxy = getattr(self.instance, EVENTS_PROXY_NAME)
f = getattr(proxy, action)
f(handler)
return copy.copy(self)
def __iadd__(self, other):
return self.__do(other, "subscribe")
def __isub__(self, other):
return self.__do(other, "unsubscribe")
|
SebNickel/garrick | refs/heads/master | pick_card.py | 1 | from random import random
import card_repository
def pick_card(conn, cursor):
card = None
while card == None:
random_float = random()
if random_float < 0.4:
score = 0
elif random_float < 0.7:
score = 1
elif random_float < 0.85:
score = 2
elif random_float < 0.95:
score = 3
elif random_float < 0.985:
score = 4
else:
score = 5
card = card_repository.select_one_by_score(cursor, score)
return card
|
jdurbin/sandbox | refs/heads/master | python/basics/scripts/Dfileutilstest.py | 1 | #!/usr/bin/env python
from DFileUtils import *
TellMeYouLoveMe()
|
marc-sensenich/ansible | refs/heads/devel | lib/ansible/template/__init__.py | 3 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import contextlib
import datetime
import os
import pwd
import re
import time
from functools import wraps
from io import StringIO
from numbers import Number
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.loaders import FileSystemLoader
from jinja2.runtime import Context, StrictUndefined
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable, AnsibleAssertionError
from ansible.module_utils.six import string_types, text_type
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.module_utils.common._collections_compat import Sequence, Mapping
from ansible.plugins.loader import filter_loader, lookup_loader, test_loader
from ansible.template.safe_eval import safe_eval
from ansible.template.template import AnsibleJ2Template
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import UnsafeProxy, wrap_var
display = Display()
__all__ = ['Templar', 'generate_ansible_template_vars']
# A regex for checking to see if a variable we're trying to
# expand is just a single variable name.
# Primitive Types which we don't want Jinja to convert to strings.
NON_TEMPLATED_TYPES = (bool, Number)
JINJA2_OVERRIDE = '#jinja2:'
USE_JINJA2_NATIVE = False
if C.DEFAULT_JINJA2_NATIVE:
try:
from jinja2.nativetypes import NativeEnvironment as Environment
from ansible.template.native_helpers import ansible_native_concat as j2_concat
USE_JINJA2_NATIVE = True
except ImportError:
from jinja2 import Environment
from jinja2.utils import concat as j2_concat
from jinja2 import __version__ as j2_version
display.warning(
'jinja2_native requires Jinja 2.10 and above. '
'Version detected: %s. Falling back to default.' % j2_version
)
else:
from jinja2 import Environment
from jinja2.utils import concat as j2_concat
def generate_ansible_template_vars(path):
b_path = to_bytes(path)
try:
template_uid = pwd.getpwuid(os.stat(b_path).st_uid).pw_name
except (KeyError, TypeError):
template_uid = os.stat(b_path).st_uid
temp_vars = {}
temp_vars['template_host'] = to_text(os.uname()[1])
temp_vars['template_path'] = path
temp_vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(b_path))
temp_vars['template_uid'] = to_text(template_uid)
temp_vars['template_fullpath'] = os.path.abspath(path)
temp_vars['template_run_date'] = datetime.datetime.now()
managed_default = C.DEFAULT_MANAGED_STR
managed_str = managed_default.format(
host=temp_vars['template_host'],
uid=temp_vars['template_uid'],
file=temp_vars['template_path'],
)
temp_vars['ansible_managed'] = to_text(time.strftime(to_native(managed_str), time.localtime(os.path.getmtime(b_path))))
return temp_vars
def _escape_backslashes(data, jinja_env):
"""Double backslashes within jinja2 expressions
A user may enter something like this in a playbook::
debug:
msg: "Test Case 1\\3; {{ test1_name | regex_replace('^(.*)_name$', '\\1')}}"
The string inside of the {{ gets interpreted multiple times First by yaml.
Then by python. And finally by jinja2 as part of it's variable. Because
it is processed by both python and jinja2, the backslash escaped
characters get unescaped twice. This means that we'd normally have to use
four backslashes to escape that. This is painful for playbook authors as
they have to remember different rules for inside vs outside of a jinja2
expression (The backslashes outside of the "{{ }}" only get processed by
yaml and python. So they only need to be escaped once). The following
code fixes this by automatically performing the extra quoting of
backslashes inside of a jinja2 expression.
"""
if '\\' in data and '{{' in data:
new_data = []
d2 = jinja_env.preprocess(data)
in_var = False
for token in jinja_env.lex(d2):
if token[1] == 'variable_begin':
in_var = True
new_data.append(token[2])
elif token[1] == 'variable_end':
in_var = False
new_data.append(token[2])
elif in_var and token[1] == 'string':
# Double backslashes only if we're inside of a jinja2 variable
new_data.append(token[2].replace('\\', '\\\\'))
else:
new_data.append(token[2])
data = ''.join(new_data)
return data
def _count_newlines_from_end(in_str):
'''
Counts the number of newlines at the end of a string. This is used during
the jinja2 templating to ensure the count matches the input, since some newlines
may be thrown away during the templating.
'''
try:
i = len(in_str)
j = i - 1
while in_str[j] == '\n':
j -= 1
return i - 1 - j
except IndexError:
# Uncommon cases: zero length string and string containing only newlines
return i
def tests_as_filters_warning(name, func):
'''
Closure to enable displaying a deprecation warning when tests are used as a filter
This closure is only used when registering ansible provided tests as filters
This function should be removed in 2.9 along with registering ansible provided tests as filters
in Templar._get_filters
'''
@wraps(func)
def wrapper(*args, **kwargs):
display.deprecated(
'Using tests as filters is deprecated. Instead of using `result|%(name)s` use '
'`result is %(name)s`' % dict(name=name),
version='2.9'
)
return func(*args, **kwargs)
return wrapper
class AnsibleUndefined(StrictUndefined):
'''
A custom Undefined class, which returns further Undefined objects on access,
rather than throwing an exception.
'''
def __getattr__(self, name):
# Return original Undefined object to preserve the first failure context
return self
def __repr__(self):
return 'AnsibleUndefined'
class AnsibleContext(Context):
'''
A custom context, which intercepts resolve() calls and sets a flag
internally if any variable lookup returns an AnsibleUnsafe value. This
flag is checked post-templating, and (when set) will result in the
final templated result being wrapped via UnsafeProxy.
'''
def __init__(self, *args, **kwargs):
super(AnsibleContext, self).__init__(*args, **kwargs)
self.unsafe = False
def _is_unsafe(self, val):
'''
Our helper function, which will also recursively check dict and
list entries due to the fact that they may be repr'd and contain
a key or value which contains jinja2 syntax and would otherwise
lose the AnsibleUnsafe value.
'''
if isinstance(val, dict):
for key in val.keys():
if self._is_unsafe(val[key]):
return True
elif isinstance(val, list):
for item in val:
if self._is_unsafe(item):
return True
elif isinstance(val, string_types) and hasattr(val, '__UNSAFE__'):
return True
return False
def _update_unsafe(self, val):
if val is not None and not self.unsafe and self._is_unsafe(val):
self.unsafe = True
def resolve(self, key):
'''
The intercepted resolve(), which uses the helper above to set the
internal flag whenever an unsafe variable value is returned.
'''
val = super(AnsibleContext, self).resolve(key)
self._update_unsafe(val)
return val
def resolve_or_missing(self, key):
val = super(AnsibleContext, self).resolve_or_missing(key)
self._update_unsafe(val)
return val
class AnsibleEnvironment(Environment):
'''
Our custom environment, which simply allows us to override the class-level
values for the Template and Context classes used by jinja2 internally.
'''
context_class = AnsibleContext
template_class = AnsibleJ2Template
class Templar:
'''
The main class for templating, with the main entry-point of template().
'''
def __init__(self, loader, shared_loader_obj=None, variables=None):
variables = {} if variables is None else variables
self._loader = loader
self._filters = None
self._tests = None
self._available_variables = variables
self._cached_result = {}
if loader:
self._basedir = loader.get_basedir()
else:
self._basedir = './'
if shared_loader_obj:
self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
self._test_loader = getattr(shared_loader_obj, 'test_loader')
self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
else:
self._filter_loader = filter_loader
self._test_loader = test_loader
self._lookup_loader = lookup_loader
# flags to determine whether certain failures during templating
# should result in fatal errors being raised
self._fail_on_lookup_errors = True
self._fail_on_filter_errors = True
self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
self.environment = AnsibleEnvironment(
trim_blocks=True,
undefined=AnsibleUndefined,
extensions=self._get_extensions(),
finalize=self._finalize,
loader=FileSystemLoader(self._basedir),
)
# the current rendering context under which the templar class is working
self.cur_context = None
self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string))
self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (
self.environment.variable_start_string,
self.environment.block_start_string,
self.environment.block_end_string,
self.environment.variable_end_string
))
self._no_type_regex = re.compile(r'.*?\|\s*(?:%s)(?:\([^\|]*\))?\s*\)?\s*(?:%s)' %
('|'.join(C.STRING_TYPE_FILTERS), self.environment.variable_end_string))
def _get_filters(self, builtin_filters):
'''
Returns filter plugins, after loading and caching them if need be
'''
if self._filters is not None:
return self._filters.copy()
self._filters = dict()
# TODO: Remove registering tests as filters in 2.9
for name, func in self._get_tests().items():
if name in builtin_filters:
# If we have a custom test named the same as a builtin filter, don't register as a filter
continue
self._filters[name] = tests_as_filters_warning(name, func)
for fp in self._filter_loader.all():
self._filters.update(fp.filters())
return self._filters.copy()
def _get_tests(self):
'''
Returns tests plugins, after loading and caching them if need be
'''
if self._tests is not None:
return self._tests.copy()
self._tests = dict()
for fp in self._test_loader.all():
self._tests.update(fp.tests())
return self._tests.copy()
def _get_extensions(self):
'''
Return jinja2 extensions to load.
If some extensions are set via jinja_extensions in ansible.cfg, we try
to load them with the jinja environment.
'''
jinja_exts = []
if C.DEFAULT_JINJA2_EXTENSIONS:
# make sure the configuration directive doesn't contain spaces
# and split extensions in an array
jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
return jinja_exts
def set_available_variables(self, variables):
'''
Sets the list of template variables this Templar instance will use
to template things, so we don't have to pass them around between
internal methods. We also clear the template cache here, as the variables
are being changed.
'''
if not isinstance(variables, dict):
raise AnsibleAssertionError("the type of 'variables' should be a dict but was a %s" % (type(variables)))
self._available_variables = variables
self._cached_result = {}
def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
convert_data=True, static_vars=None, cache=True, disable_lookups=False):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
before being sent through the template engine.
'''
static_vars = [''] if static_vars is None else static_vars
# Don't template unsafe variables, just return them.
if hasattr(variable, '__UNSAFE__'):
return variable
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
if convert_bare:
variable = self._convert_bare_variable(variable)
if isinstance(variable, string_types):
result = variable
if self._contains_vars(variable):
# Check to see if the string we are trying to render is just referencing a single
# var. In this case we don't want to accidentally change the type of the variable
# to a string by using the jinja template renderer. We just want to pass it.
only_one = self.SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
if var_name in self._available_variables:
resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
elif resolved_val is None:
return C.DEFAULT_NULL_REPRESENTATION
# Using a cache in order to prevent template calls with already templated variables
sha1_hash = None
if cache:
variable_hash = sha1(text_type(variable).encode('utf-8'))
options_hash = sha1(
(
text_type(preserve_trailing_newlines) +
text_type(escape_backslashes) +
text_type(fail_on_undefined) +
text_type(overrides)
).encode('utf-8')
)
sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
if cache and sha1_hash in self._cached_result:
result = self._cached_result[sha1_hash]
else:
result = self.do_template(
variable,
preserve_trailing_newlines=preserve_trailing_newlines,
escape_backslashes=escape_backslashes,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
if not USE_JINJA2_NATIVE:
unsafe = hasattr(result, '__UNSAFE__')
if convert_data and not self._no_type_regex.match(variable):
# if this looks like a dictionary or list, convert it to such using the safe_eval method
if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
result.startswith("[") or result in ("True", "False"):
eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
if eval_results[1] is None:
result = eval_results[0]
if unsafe:
result = wrap_var(result)
else:
# FIXME: if the safe_eval raised an error, should we do something with it?
pass
# we only cache in the case where we have a single variable
# name, to make sure we're not putting things which may otherwise
# be dynamic in the cache (filters, lookups, etc.)
if cache:
self._cached_result[sha1_hash] = result
return result
elif isinstance(variable, (list, tuple)):
return [self.template(
v,
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
) for v in variable]
elif isinstance(variable, (dict, Mapping)):
d = {}
# we don't use iteritems() here to avoid problems if the underlying dict
# changes sizes due to the templating, which can happen with hostvars
for k in variable.keys():
if k not in static_vars:
d[k] = self.template(
variable[k],
preserve_trailing_newlines=preserve_trailing_newlines,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
)
else:
d[k] = variable[k]
return d
else:
return variable
except AnsibleFilterError:
if self._fail_on_filter_errors:
raise
else:
return variable
def is_template(self, data):
''' lets us know if data has a template'''
if isinstance(data, string_types):
try:
new = self.do_template(data, fail_on_undefined=True)
except (AnsibleUndefinedVariable, UndefinedError):
return True
except Exception:
return False
return (new != data)
elif isinstance(data, (list, tuple)):
for v in data:
if self.is_template(v):
return True
elif isinstance(data, dict):
for k in data:
if self.is_template(k) or self.is_template(data[k]):
return True
return False
def templatable(self, data):
'''
returns True if the data can be templated w/o errors
'''
templatable = True
try:
self.template(data)
except Exception:
templatable = False
return templatable
def _contains_vars(self, data):
'''
returns True if the data contains a variable pattern
'''
if isinstance(data, string_types):
for marker in (self.environment.block_start_string, self.environment.variable_start_string, self.environment.comment_start_string):
if marker in data:
return True
return False
def _convert_bare_variable(self, variable):
'''
Wraps a bare string, which may have an attribute portion (ie. foo.bar)
in jinja2 variable braces so that it is evaluated properly.
'''
if isinstance(variable, string_types):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
if (contains_filters or first_part in self._available_variables) and self.environment.variable_start_string not in variable:
return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string)
# the variable didn't meet the conditions to be converted,
# so just return it as-is
return variable
def _finalize(self, thing):
'''
A custom finalize method for jinja2, which prevents None from being returned. This
avoids a string of ``"None"`` as ``None`` has no importance in YAML.
If using ANSIBLE_JINJA2_NATIVE we bypass this and return the actual value always
'''
if USE_JINJA2_NATIVE:
return thing
return thing if thing is not None else ''
def _fail_lookup(self, name, *args, **kwargs):
raise AnsibleError("The lookup `%s` was found, however lookups were disabled from templating" % name)
def _now_datetime(self, utc=False, fmt=None):
'''jinja2 global function to return current datetime, potentially formatted via strftime'''
if utc:
now = datetime.datetime.utcnow()
else:
now = datetime.datetime.now()
if fmt:
return now.strftime(fmt)
return now
def _query_lookup(self, name, *args, **kwargs):
''' wrapper for lookup, force wantlist true'''
kwargs['wantlist'] = True
return self._lookup(name, *args, **kwargs)
def _lookup(self, name, *args, **kwargs):
instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
wantlist = kwargs.pop('wantlist', False)
allow_unsafe = kwargs.pop('allow_unsafe', C.DEFAULT_ALLOW_UNSAFE_LOOKUPS)
errors = kwargs.pop('errors', 'strict')
from ansible.utils.listify import listify_lookup_plugin_terms
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
try:
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
except (AnsibleUndefinedVariable, UndefinedError) as e:
raise AnsibleUndefinedVariable(e)
except Exception as e:
if self._fail_on_lookup_errors:
msg = u"An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % \
(name, type(e), to_text(e))
if errors == 'warn':
display.warning(msg)
elif errors == 'ignore':
display.display(msg, log_only=True)
else:
raise AnsibleError(to_native(msg))
ran = None
if ran and not allow_unsafe:
if wantlist:
ran = wrap_var(ran)
else:
try:
ran = UnsafeProxy(",".join(ran))
except TypeError:
# Lookup Plugins should always return lists. Throw an error if that's not
# the case:
if not isinstance(ran, Sequence):
raise AnsibleError("The lookup plugin '%s' did not return a list."
% name)
# The TypeError we can recover from is when the value *inside* of the list
# is not a string
if len(ran) == 1:
ran = wrap_var(ran[0])
else:
ran = wrap_var(ran)
if self.cur_context:
self.cur_context.unsafe = True
return ran
else:
raise AnsibleError("lookup plugin (%s) not found" % name)
def do_template(self, data, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, disable_lookups=False):
if USE_JINJA2_NATIVE and not isinstance(data, string_types):
return data
# For preserving the number of input newlines in the output (used
# later in this method)
data_newlines = _count_newlines_from_end(data)
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
try:
# allows template header overrides to change jinja2 options.
if overrides is None:
myenv = self.environment.overlay()
else:
myenv = self.environment.overlay(overrides)
# Get jinja env overrides from template
if hasattr(data, 'startswith') and data.startswith(JINJA2_OVERRIDE):
eol = data.find('\n')
line = data[len(JINJA2_OVERRIDE):eol]
data = data[eol + 1:]
for pair in line.split(','):
(key, val) = pair.split(':')
key = key.strip()
setattr(myenv, key, ast.literal_eval(val.strip()))
# Adds Ansible custom filters and tests
myenv.filters.update(self._get_filters(myenv.filters))
myenv.tests.update(self._get_tests())
if escape_backslashes:
# Allow users to specify backslashes in playbooks as "\\" instead of as "\\\\".
data = _escape_backslashes(data, myenv)
try:
t = myenv.from_string(data)
except TemplateSyntaxError as e:
raise AnsibleError("template error while templating string: %s. String: %s" % (to_native(e), to_native(data)))
except Exception as e:
if 'recursion' in to_native(e):
raise AnsibleError("recursive loop detected in template string: %s" % to_native(data))
else:
return data
if disable_lookups:
t.globals['query'] = t.globals['q'] = t.globals['lookup'] = self._fail_lookup
else:
t.globals['lookup'] = self._lookup
t.globals['query'] = t.globals['q'] = self._query_lookup
t.globals['now'] = self._now_datetime
t.globals['finalize'] = self._finalize
jvars = AnsibleJ2Vars(self, t.globals)
self.cur_context = new_context = t.new_context(jvars, shared=True)
rf = t.root_render_func(new_context)
try:
res = j2_concat(rf)
if getattr(new_context, 'unsafe', False):
res = wrap_var(res)
except TypeError as te:
if 'AnsibleUndefined' in to_native(te):
errmsg = "Unable to look up a name or access an attribute in template string (%s).\n" % to_native(data)
errmsg += "Make sure your variable name does not contain invalid characters like '-': %s" % to_native(te)
raise AnsibleUndefinedVariable(errmsg)
else:
display.debug("failing because of a type error, template data is: %s" % to_native(data))
raise AnsibleError("Unexpected templating type error occurred on (%s): %s" % (to_native(data), to_native(te)))
if USE_JINJA2_NATIVE and not isinstance(res, string_types):
return res
if preserve_trailing_newlines:
# The low level calls above do not preserve the newline
# characters at the end of the input data, so we use the
# calculate the difference in newlines and append them
# to the resulting output for parity
#
# jinja2 added a keep_trailing_newline option in 2.7 when
# creating an Environment. That would let us make this code
# better (remove a single newline if
# preserve_trailing_newlines is False). Once we can depend on
# that version being present, modify our code to set that when
# initializing self.environment and remove a single trailing
# newline here if preserve_newlines is False.
res_newlines = _count_newlines_from_end(res)
if data_newlines > res_newlines:
res += self.environment.newline_sequence * (data_newlines - res_newlines)
return res
except (UndefinedError, AnsibleUndefinedVariable) as e:
if fail_on_undefined:
raise AnsibleUndefinedVariable(e)
else:
display.debug("Ignoring undefined failure: %s" % to_text(e))
return data
# for backwards compatibility in case anyone is using old private method directly
_do_template = do_template
|
mozilla/popcorn_maker | refs/heads/master | vendor-local/lib/python/requests/packages/oreos/__init__.py | 53 | # -*- coding: utf-8 -*-
from .core import dict_from_string |
vabs22/zulip | refs/heads/master | tools/linter_lib/custom_check.py | 1 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import traceback
from .printer import print_err, colors
from typing import cast, Any, Callable, Dict, List, Optional, Tuple
def build_custom_checkers(by_lang):
# type: (Dict[str, List[str]]) -> Tuple[Callable[[], bool], Callable[[], bool]]
RuleList = List[Dict[str, Any]]
def custom_check_file(fn, identifier, rules, color, skip_rules=None, max_length=None):
# type: (str, str, RuleList, str, Optional[Any], Optional[int]) -> bool
failed = False
line_tups = []
for i, line in enumerate(open(fn)):
line_newline_stripped = line.strip('\n')
line_fully_stripped = line_newline_stripped.strip()
skip = False
for rule in skip_rules or []:
if re.match(rule, line):
skip = True
if line_fully_stripped.endswith(' # nolint'):
continue
if skip:
continue
tup = (i, line, line_newline_stripped, line_fully_stripped)
line_tups.append(tup)
rules_to_apply = []
fn_dirname = os.path.dirname(fn)
for rule in rules:
exclude_list = rule.get('exclude', set())
if fn in exclude_list or fn_dirname in exclude_list:
continue
if rule.get("include_only"):
found = False
for item in rule.get("include_only", set()):
if item in fn:
found = True
if not found:
continue
rules_to_apply.append(rule)
for rule in rules_to_apply:
exclude_lines = {
line for
(exclude_fn, line) in rule.get('exclude_line', set())
if exclude_fn == fn
}
pattern = rule['pattern']
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if line_fully_stripped in exclude_lines:
exclude_lines.remove(line_fully_stripped)
continue
try:
line_to_check = line_fully_stripped
if rule.get('strip') is not None:
if rule['strip'] == '\n':
line_to_check = line_newline_stripped
else:
raise Exception("Invalid strip rule")
if re.search(pattern, line_to_check):
print_err(identifier, color, '{} at {} line {}:'.format(
rule['description'], fn, i+1))
print_err(identifier, color, line)
failed = True
except Exception:
print("Exception with %s at %s line %s" % (rule['pattern'], fn, i+1))
traceback.print_exc()
if exclude_lines:
print('Please remove exclusions for file %s: %s' % (fn, exclude_lines))
lastLine = None
for (i, line, line_newline_stripped, line_fully_stripped) in line_tups:
if isinstance(line, bytes):
line_length = len(line.decode("utf-8"))
else:
line_length = len(line)
if (max_length is not None and line_length > max_length and
'# type' not in line and 'test' not in fn and 'example' not in fn and
not re.match("\[[ A-Za-z0-9_:,&()-]*\]: http.*", line) and
not re.match("`\{\{ external_api_uri_subdomain \}\}[^`]+`", line) and
"#ignorelongline" not in line and 'migrations' not in fn):
print("Line too long (%s) at %s line %s: %s" % (len(line), fn, i+1, line_newline_stripped))
failed = True
lastLine = line
if lastLine and ('\n' not in lastLine):
print("No newline at the end of file. Fix with `sed -i '$a\\' %s`" % (fn,))
failed = True
return failed
whitespace_rules = [
# This linter should be first since bash_rules depends on it.
{'pattern': '\s+$',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '\t',
'strip': '\n',
'exclude': set(['zerver/lib/bugdown/codehilite.py',
'tools/travis/success-http-headers.txt']),
'description': 'Fix tab-based whitespace'},
] # type: RuleList
markdown_whitespace_rules = list([rule for rule in whitespace_rules if rule['pattern'] != '\s+$']) + [
# Two spaces trailing a line with other content is okay--it's a markdown line break.
# This rule finds one space trailing a non-space, three or more trailing spaces, and
# spaces on an empty line.
{'pattern': '((?<!\s)\s$)|(\s\s\s+$)|(^\s+$)',
'strip': '\n',
'description': 'Fix trailing whitespace'},
{'pattern': '^#+[A-Za-z0-9]',
'strip': '\n',
'description': 'Missing space after # in heading'},
] # type: RuleList
js_rules = cast(RuleList, [
{'pattern': '[^_]function\(',
'description': 'The keyword "function" should be followed by a space'},
{'pattern': '.*blueslip.warning\(.*',
'description': 'The module blueslip has no function warning, try using blueslip.warn'},
{'pattern': '[)]{$',
'description': 'Missing space between ) and {'},
{'pattern': '["\']json/',
'description': 'Relative URL for JSON route not supported by i18n'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '^[ ]*//[A-Za-z0-9]',
'description': 'Missing space after // in comment'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': 'else{$',
'description': 'Missing space between else and {'},
{'pattern': '^else {$',
'description': 'Write JS else statements on same line as }'},
{'pattern': '^else if',
'description': 'Write JS else statements on same line as }'},
{'pattern': 'console[.][a-z]',
'exclude': set(['static/js/blueslip.js',
'frontend_tests/zjsunit',
'frontend_tests/casper_lib/common.js',
'frontend_tests/node_tests',
'static/js/debug.js']),
'description': 'console.log and similar should not be used in webapp'},
{'pattern': 'i18n[.]t',
'include_only': set(['static/js/portico/']),
'description': 'i18n.t is not available in portico pages yet'},
{'pattern': '[.]text\(["\'][a-zA-Z]',
'description': 'Strings passed to $().text should be wrapped in i18n.t() for internationalization'},
{'pattern': 'compose_error\(["\']',
'description': 'Argument to compose_error should be a literal string enclosed '
'by i18n.t()'},
{'pattern': 'ui.report_success\(',
'description': 'Deprecated function, use ui_report.success.'},
{'pattern': 'report.success\(["\']',
'description': 'Argument to report_success should be a literal string enclosed '
'by i18n.t()'},
{'pattern': 'ui.report_error\(',
'description': 'Deprecated function, use ui_report.error.'},
{'pattern': 'report.error\(["\']',
'description': 'Argument to report_error should be a literal string enclosed '
'by i18n.t()'},
]) + whitespace_rules
python_rules = cast(RuleList, [
{'pattern': '^(?!#)@login_required',
'description': '@login_required is unsupported; use @zulip_login_required'},
{'pattern': '".*"%\([a-z_].*\)?$',
'description': 'Missing space around "%"'},
{'pattern': "'.*'%\([a-z_].*\)?$",
'exclude': set(['analytics/lib/counts.py',
'analytics/tests/test_counts.py',
]),
'exclude_line': set([
('zerver/views/users.py',
"return json_error(_(\"Email '%(email)s' not allowed for realm '%(realm)s'\") %"),
('zproject/settings.py',
"'format': '%(asctime)s %(levelname)-8s %(message)s'"),
('static/templates/settings/bot-settings.handlebars',
"'https://hostname.example.com/bots/followup'"),
]),
'description': 'Missing space around "%"'},
# This rule is constructed with + to avoid triggering on itself
{'pattern': " =" + '[^ =>~"]',
'description': 'Missing whitespace after "="'},
{'pattern': '":\w[^"]*$',
'description': 'Missing whitespace after ":"'},
{'pattern': "':\w[^']*$",
'description': 'Missing whitespace after ":"'},
{'pattern': "^\s+[#]\w",
'strip': '\n',
'description': 'Missing whitespace after "#"'},
{'pattern': "assertEquals[(]",
'description': 'Use assertEqual, not assertEquals (which is deprecated).'},
{'pattern': "== None",
'description': 'Use `is None` to check whether something is None'},
{'pattern': "type:[(]",
'description': 'Missing whitespace after ":" in type annotation'},
{'pattern': "type: ignore$",
'exclude': set(['tools/tests',
'zerver/lib/test_runner.py',
'zerver/tests']),
'description': '"type: ignore" should always end with "# type: ignore # explanation for why"'},
{'pattern': "# type [(]",
'description': 'Missing : after type in type annotation'},
{'pattern': "#type",
'description': 'Missing whitespace after "#" in type annotation'},
{'pattern': 'if[(]',
'description': 'Missing space between if and ('},
{'pattern': ", [)]",
'description': 'Unnecessary whitespace between "," and ")"'},
{'pattern': "% [(]",
'description': 'Unnecessary whitespace between "%" and "("'},
# This next check could have false positives, but it seems pretty
# rare; if we find any, they can be added to the exclude list for
# this rule.
{'pattern': ' % [a-zA-Z0-9_.]*\)?$',
'exclude_line': set([
('tools/tests/test_template_parser.py', '{% foo'),
]),
'description': 'Used % comprehension without a tuple'},
{'pattern': '.*%s.* % \([a-zA-Z0-9_.]*\)$',
'description': 'Used % comprehension without a tuple'},
{'pattern': 'django.utils.translation',
'include_only': set(['test/']),
'description': 'Test strings should not be tagged for translationx'},
{'pattern': 'json_success\({}\)',
'description': 'Use json_success() to return nothing'},
# To avoid json_error(_variable) and json_error(_(variable))
{'pattern': '\Wjson_error\(_\(?\w+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to json_error should be a literal string enclosed by _()'},
{'pattern': '\Wjson_error\([\'"].+[),]$',
'exclude': set(['zerver/tests']),
'exclude_line': set([
# We don't want this string tagged for translation.
('zerver/views/compatibility.py', 'return json_error("Client is too old")'),
]),
'description': 'Argument to json_error should a literal string enclosed by _()'},
# To avoid JsonableError(_variable) and JsonableError(_(variable))
{'pattern': '\WJsonableError\(_\(?\w.+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': '\WJsonableError\(["\'].+\)',
'exclude': set(['zerver/tests']),
'description': 'Argument to JsonableError should be a literal string enclosed by _()'},
{'pattern': '([a-zA-Z0-9_]+)=REQ\([\'"]\\1[\'"]',
'description': 'REQ\'s first argument already defaults to parameter name'},
{'pattern': 'self\.client\.(get|post|patch|put|delete)',
'exclude': set(['zilencer/tests.py']),
'description': \
'''Do not call self.client directly for put/patch/post/get.
See WRAPPER_COMMENT in test_helpers.py for details.
'''},
# Directly fetching Message objects in e.g. views code is often a security bug.
{'pattern': '[^r][M]essage.objects.get',
'exclude': set(["zerver/tests", "zerver/worker/queue_processors.py"]),
'description': 'Please use access_message() to fetch Message objects',
},
{'pattern': '[S]tream.objects.get',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': 'get_stream[(]',
'include_only': set(["zerver/views/", "zerver/lib/actions.py"]),
# messages.py needs to support accessing invite-only streams
# that you are no longer subscribed to, so need get_stream.
'exclude': set(['zerver/views/messages.py']),
'exclude_line': set([
# This is a check for whether a stream rename is invalid because it already exists
('zerver/lib/actions.py', 'get_stream(new_name, stream.realm)'),
# This one in check_message is kinda terrible, since it's
# how most instances are written, but better to exclude something than nothing
('zerver/lib/actions.py', 'stream = get_stream(stream_name, realm)'),
('zerver/lib/actions.py', 'get_stream(signups_stream, admin_realm)'),
]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '[S]tream.objects.filter',
'include_only': set(["zerver/views/"]),
'description': 'Please use access_stream_by_*() to fetch Stream objects',
},
{'pattern': '^from (zerver|analytics|confirmation)',
'include_only': set(["/migrations/"]),
'exclude': set(['zerver/migrations/0032_verify_all_medium_avatar_images.py',
'zerver/migrations/0041_create_attachments_for_old_messages.py',
'zerver/migrations/0060_move_avatars_to_be_uid_based.py']),
'description': "Don't import models or other code in migrations; see docs/schema-migrations.md",
},
{'pattern': 'datetime[.](now|utcnow)',
'include_only': set(["zerver/", "analytics/"]),
'description': "Don't use datetime in backend code.\n"
"See https://zulip.readthedocs.io/en/latest/code-style.html#naive-datetime-objects",
},
{'pattern': 'render_to_response\(',
'description': "Use render() instead of render_to_response().",
},
# This rule might give false positives in virtualenv setup files which should be excluded,
# and comments which should be rewritten to avoid use of "python2", "python3", etc.
{'pattern': 'python[23]',
'exclude': set(['tools/lib/provision.py',
'tools/setup/setup_venvs.py',
'scripts/lib/setup_venv.py']),
'description': 'Explicit python invocations should not include a version'},
{'pattern': '(^|\s)open\s*\(',
'description': 'open() should not be used in Zulip\'s bots. Use functions'
' provided by the bots framework to access the filesystem.',
'include_only': set(['api/bots/']),
'exclude': set(['api/bots/john/john.py'])},
]) + whitespace_rules
bash_rules = [
{'pattern': '#!.*sh [-xe]',
'description': 'Fix shebang line with proper call to /usr/bin/env for Bash path, change -x|-e switches'
' to set -x|set -e'},
] + whitespace_rules[0:1] # type: RuleList
css_rules = cast(RuleList, [
{'pattern': '^[^:]*:\S[^:]*;$',
'description': "Missing whitespace after : in CSS"},
{'pattern': '[a-z]{',
'description': "Missing whitespace before '{' in CSS."},
{'pattern': 'https://',
'description': "Zulip CSS should have no dependencies on external resources"},
{'pattern': '^[ ][ ][a-zA-Z0-9]',
'description': "Incorrect 2-space indentation in CSS",
'exclude': set(['static/third/thirdparty-fonts.css']),
'strip': '\n'},
{'pattern': '{\w',
'description': "Missing whitespace after '{' in CSS (should be newline)."},
{'pattern': ' thin[; ]',
'description': "thin CSS attribute is under-specified, please use 1px."},
{'pattern': ' medium[; ]',
'description': "medium CSS attribute is under-specified, please use pixels."},
{'pattern': ' thick[; ]',
'description': "thick CSS attribute is under-specified, please use pixels."},
]) + whitespace_rules # type: RuleList
prose_style_rules = [
{'pattern': '[^\/\#\-\"]([jJ]avascript)', # exclude usage in hrefs/divs
'description': "javascript should be spelled JavaScript"},
{'pattern': '[^\/\-\.\"\'\_\=\>]([gG]ithub)[^\.\-\_\"\<]', # exclude usage in hrefs/divs
'description': "github should be spelled GitHub"},
{'pattern': '[oO]rganisation', # exclude usage in hrefs/divs
'description': "Organization is spelled with a z"},
{'pattern': '!!! warning',
'description': "!!! warning is invalid; it's spelled '!!! warn'"},
] # type: RuleList
html_rules = whitespace_rules + prose_style_rules + [
{'pattern': 'placeholder="[^{]',
'description': "`placeholder` value should be translatable.",
'exclude_line': [('templates/zerver/register.html', 'placeholder="acme"'),
('templates/zerver/register.html', 'placeholder="Acme or Aκμή"'),
('static/templates/settings/realm-domains-modal.handlebars',
'<td><input type="text" class="new-realm-domain" placeholder="acme.com"></input></td>')],
'exclude': set(["static/templates/settings/emoji-settings-admin.handlebars",
"static/templates/settings/realm-filter-settings-admin.handlebars",
"static/templates/settings/bot-settings.handlebars"])},
{'pattern': "placeholder='[^{]",
'description': "`placeholder` value should be translatable."},
{'pattern': "aria-label='[^{]",
'description': "`aria-label` value should be translatable."},
{'pattern': 'aria-label="[^{]',
'description': "`aria-label` value should be translatable."},
{'pattern': 'script src="http',
'description': "Don't directly load dependencies from CDNs. See docs/front-end-build-process.md"},
{'pattern': "title='[^{]",
'description': "`title` value should be translatable."},
{'pattern': 'title="[^{\:]',
'exclude_line': set([
('templates/zerver/markdown_help.html',
'<td><img alt=":heart:" class="emoji" src="/static/generated/emoji/images/emoji/heart.png" title=":heart:" /></td>')
]),
'exclude': set(["templates/zerver/emails"]),
'description': "`title` value should be translatable."},
{'pattern': '\Walt=["\'][^{"\']',
'description': "alt argument should be enclosed by _() or it should be an empty string.",
'exclude': set(['static/templates/settings/display-settings.handlebars',
'templates/zerver/keyboard_shortcuts.html',
'templates/zerver/markdown_help.html']),
},
{'pattern': '\Walt=["\']{{ ?["\']',
'description': "alt argument should be enclosed by _().",
},
] # type: RuleList
handlebars_rules = html_rules + [
{'pattern': "[<]script",
'description': "Do not use inline <script> tags here; put JavaScript in static/js instead."},
{'pattern': "{{t '.*' }}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': '{{t ".*" }}[\.\?!]',
'description': "Period should be part of the translatable string."},
{'pattern': "{{/tr}}[\.\?!]",
'description': "Period should be part of the translatable string."},
]
jinja2_rules = html_rules + [
{'pattern': "{% endtrans %}[\.\?!]",
'description': "Period should be part of the translatable string."},
{'pattern': "{{ _(.+) }}[\.\?!]",
'description': "Period should be part of the translatable string."},
]
json_rules = [] # type: RuleList # fix newlines at ends of files
# It is okay that json_rules is empty, because the empty list
# ensures we'll still check JSON files for whitespace.
markdown_rules = markdown_whitespace_rules + prose_style_rules + [
{'pattern': '\[(?P<url>[^\]]+)\]\((?P=url)\)',
'description': 'Linkified markdown URLs should use cleaner <http://example.com> syntax.'}
]
help_markdown_rules = markdown_rules + [
{'pattern': '[a-z][.][A-Z]',
'description': "Likely missing space after end of sentence"},
{'pattern': '[rR]ealm',
'description': "Realms are referred to as Organizations in user-facing docs."},
]
txt_rules = whitespace_rules
def check_custom_checks_py():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['py']:
if 'custom_check.py' in fn:
continue
if custom_check_file(fn, 'py', python_rules, color, max_length=140):
failed = True
return failed
def check_custom_checks_nonpy():
# type: () -> bool
failed = False
color = next(colors)
for fn in by_lang['js']:
if custom_check_file(fn, 'js', js_rules, color):
failed = True
color = next(colors)
for fn in by_lang['sh']:
if custom_check_file(fn, 'sh', bash_rules, color):
failed = True
color = next(colors)
for fn in by_lang['css']:
if custom_check_file(fn, 'css', css_rules, color):
failed = True
color = next(colors)
for fn in by_lang['handlebars']:
if custom_check_file(fn, 'handlebars', handlebars_rules, color):
failed = True
color = next(colors)
for fn in by_lang['html']:
if custom_check_file(fn, 'html', jinja2_rules, color):
failed = True
color = next(colors)
for fn in by_lang['json']:
if custom_check_file(fn, 'json', json_rules, color):
failed = True
color = next(colors)
markdown_docs_length_exclude = {
"api/bots/converter/readme.md",
"docs/bots-guide.md",
"docs/dev-env-first-time-contributors.md",
"docs/webhook-walkthrough.md",
"docs/life-of-a-request.md",
"docs/logging.md",
"docs/migration-renumbering.md",
"docs/readme-symlink.md",
"README.md",
"zerver/webhooks/helloworld/doc.md",
"zerver/webhooks/trello/doc.md",
"templates/zerver/integrations/perforce.md",
}
for fn in by_lang['md']:
max_length = None
if fn not in markdown_docs_length_exclude:
max_length = 120
rules = markdown_rules
if fn.startswith("templates/zerver/help"):
rules = help_markdown_rules
if custom_check_file(fn, 'md', rules, color, max_length=max_length):
failed = True
color = next(colors)
for fn in by_lang['txt'] + by_lang['text']:
if custom_check_file(fn, 'txt', txt_rules, color):
failed = True
color = next(colors)
for fn in by_lang['yaml']:
if custom_check_file(fn, 'yaml', txt_rules, color):
failed = True
return failed
return (check_custom_checks_py, check_custom_checks_nonpy)
|
marineam/coreos-dev-util | refs/heads/master | constants.py | 3 | # Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
# Used for importing chromite which is expected to be 3 levels up from
# this file.
SOURCE_ROOT = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir)
|
simongoffin/my_odoo_tutorial | refs/heads/master | addons/account_analytic_plans/__init__.py | 445 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_plans
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ahmetdaglarbas/e-commerce | refs/heads/tez | sites/sandbox/apps/user/models.py | 43 | """
Sample user/profile models for testing. These aren't enabled by default in the
sandbox
"""
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from oscar.core import compat
from oscar.apps.customer import abstract_models
class Profile(models.Model):
"""
Dummy profile model used for testing
"""
user = models.OneToOneField(compat.AUTH_USER_MODEL, related_name="profile")
MALE, FEMALE = 'M', 'F'
choices = (
(MALE, 'Male'),
(FEMALE, 'Female'))
gender = models.CharField(max_length=1, choices=choices,
verbose_name='Gender')
age = models.PositiveIntegerField(verbose_name='Age')
# A simple extension of the core User model for Django 1.5
try:
from django.contrib.auth.models import (
AbstractUser, BaseUserManager, AbstractBaseUser)
except ImportError:
pass
else:
class ExtendedUserModel(AbstractUser):
twitter_username = models.CharField(max_length=255, unique=True)
class CustomUserManager(BaseUserManager):
def create_user(self, email, password=None):
now = timezone.now()
email = BaseUserManager.normalize_email(email)
user = self.model(email=email, last_login=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
return self.create_user(email, password)
# A user model which doesn't extend AbstractUser
@python_2_unicode_compatible
class CustomUserModel(AbstractBaseUser):
name = models.CharField(max_length=255, blank=True)
email = models.EmailField(unique=True)
twitter_username = models.CharField(max_length=255, unique=True)
USERNAME_FIELD = 'email'
objects = CustomUserManager()
def __str__(self):
return self.email
def get_full_name(self):
return self.name
get_short_name = get_full_name
# A simple extension of the core Oscar User model
class ExtendedOscarUserModel(abstract_models.AbstractUser):
twitter_username = models.CharField(max_length=255, unique=True)
|
hongbin-liu/nxt-python | refs/heads/master | nxt/brick.py | 21 | # nxt.brick module -- Classes to represent LEGO Mindstorms NXT bricks
# Copyright (C) 2006 Douglas P Lau
# Copyright (C) 2009 Marcus Wanner, rhn
# Copyright (C) 2010 rhn, Marcus Wanner, zonedabone
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from time import sleep
from threading import Lock
from nxt.error import FileNotFound, ModuleNotFound
from nxt.telegram import OPCODES, Telegram
from nxt.sensor import get_sensor
from nxt.motcont import MotCont
def _make_poller(opcode, poll_func, parse_func):
def poll(self, *args, **kwargs):
ogram = poll_func(opcode, *args, **kwargs)
with self.lock:
self.sock.send(str(ogram))
if ogram.reply:
igram = Telegram(opcode=opcode, pkt=self.sock.recv())
if ogram.reply:
return parse_func(igram)
else:
return None
return poll
class _Meta(type):
'Metaclass which adds one method for each telegram opcode'
def __init__(cls, name, bases, dict):
super(_Meta, cls).__init__(name, bases, dict)
for opcode in OPCODES:
poll_func, parse_func = OPCODES[opcode][0:2]
m = _make_poller(opcode, poll_func, parse_func)
try:
m.__doc__ = OPCODES[opcode][2]
except:
pass
setattr(cls, poll_func.__name__, m)
class FileFinder(object):
'A generator to find files on a NXT brick.'
def __init__(self, brick, pattern):
self.brick = brick
self.pattern = pattern
self.handle = None
def _close(self):
if self.handle is not None:
self.brick.close(self.handle)
self.handle = None
def __del__(self):
self._close()
def __iter__(self):
results = []
self.handle, fname, size = self.brick.find_first(self.pattern)
results.append((fname, size))
while True:
try:
handle, fname, size = self.brick.find_next(self.handle)
results.append((fname, size))
except FileNotFound:
self._close()
break
for result in results:
yield result
def File(brick, name, mode='r', size=None):
"""Opens a file for reading/writing. Mode is 'r' or 'w'. If mode is 'w',
size must be provided.
"""
if mode == 'w':
if size is not None:
return FileWriter(brick, name, size)
else:
return ValueError('Size not specified')
elif mode == 'r':
return FileReader(brick, name)
else:
return ValueError('Mode ' + str(mode) + ' not supported')
class FileReader(object):
"""Context manager to read a file on a NXT brick. Do use the iterator or
the read() method, but not both at the same time!
The iterator returns strings of an arbitrary (short) length.
"""
def __init__(self, brick, fname):
self.brick = brick
self.handle, self.size = brick.open_read(fname)
def read(self, bytes=None):
if bytes is not None:
remaining = bytes
else:
remaining = self.size
bsize = self.brick.sock.bsize
data = []
while remaining > 0:
handle, bsize, buffer_ = self.brick.read(self.handle,
min(bsize, remaining))
remaining -= len(buffer_)
data.append(buffer_)
return ''.join(data)
def close(self):
if self.handle is not None:
self.brick.close(self.handle)
self.handle = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, etp, value, tb):
self.close()
def __iter__(self):
rem = self.size
bsize = self.brick.sock.bsize
while rem > 0:
handle, bsize, data = self.brick.read(self.handle,
min(bsize, rem))
yield data
rem -= len(data)
class FileWriter(object):
"Object to write to a file on a NXT brick"
def __init__(self, brick, fname, size):
self.brick = brick
self.handle = self.brick.open_write(fname, size)
self._position = 0
self.size = size
def __del__(self):
self.close()
def close(self):
if self.handle is not None:
self.brick.close(self.handle)
self.handle = None
def tell(self):
return self._position
def write(self, data):
remaining = len(data)
if remaining > self.size - self._position:
raise ValueError('Data will not fit into remaining space')
bsize = self.brick.sock.bsize
data_position = 0
while remaining > 0:
batch_size = min(bsize, remaining)
next_data_position = data_position + batch_size
buffer_ = data[data_position:next_data_position]
handle, size = self.brick.write(self.handle, buffer_)
self._position += batch_size
data_position = next_data_position
remaining -= batch_size
class ModuleFinder(object):
'Iterator to lookup modules on a NXT brick'
def __init__(self, brick, pattern):
self.brick = brick
self.pattern = pattern
self.handle = None
def _close(self):
if self.handle:
self.brick.close(self.handle)
self.handle = None
def __del__(self):
self._close()
def __iter__(self):
self.handle, mname, mid, msize, miomap_size = \
self.brick.request_first_module(self.pattern)
yield (mname, mid, msize, miomap_size)
while True:
try:
handle, mname, mid, msize, miomap_size = \
self.brick.request_next_module(
self.handle)
yield (mname, mid, msize, miomap_size)
except ModuleNotFound:
self._close()
break
class Brick(object): #TODO: this begs to have explicit methods
'Main object for NXT Control'
__metaclass__ = _Meta
def __init__(self, sock):
self.sock = sock
self.lock = Lock()
self.mc = MotCont(self)
def play_tone_and_wait(self, frequency, duration):
self.play_tone(frequency, duration)
sleep(duration / 1000.0)
def __del__(self):
self.sock.close()
find_files = FileFinder
find_modules = ModuleFinder
open_file = File
get_sensor = get_sensor
|
KnCMiner/bitcoin | refs/heads/knc | contrib/devtools/update-translations.py | 168 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bitcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation '%s'" % sanitize_string(translation))
return False
else:
if source_f != translation_f:
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
boegel/easybuild-easyblocks | refs/heads/master | easybuild/easyblocks/i/ifort.py | 1 | ##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing the Intel Fortran compiler suite, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import os
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.intelbase import IntelBase
from easybuild.easyblocks.icc import EB_icc # @UnresolvedImport
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_ifort(EB_icc, IntelBase):
"""
Class that can be used to install ifort
- tested with 11.1.046
-- will fail for all older versions (due to newer silent installer)
"""
def sanity_check_step(self):
"""Custom sanity check paths for ifort."""
shlib_ext = get_shared_lib_ext()
binprefix = 'bin/intel64'
libprefix = 'lib/intel64'
if LooseVersion(self.version) >= LooseVersion('2011'):
if LooseVersion(self.version) <= LooseVersion('2011.3.174'):
binprefix = 'bin'
elif LooseVersion(self.version) >= LooseVersion('2013_sp1'):
binprefix = 'bin'
else:
libprefix = 'compiler/lib/intel64'
bins = ['ifort']
if LooseVersion(self.version) < LooseVersion('2013'):
# idb is not shipped with ifort anymore in 2013.x versions (it is with icc though)
bins.append('idb')
libs = ['lib%s' % lib for lib in ['ifcore.a', 'ifcore.%s' % shlib_ext, 'iomp5.a', 'iomp5.%s' % shlib_ext]]
custom_paths = {
'files': [os.path.join(binprefix, x) for x in bins] + [os.path.join(libprefix, lib) for lib in libs],
'dirs': [],
}
# make very sure that expected 'compilers_and_libraries_<VERSION>/linux' subdir is there for recent versions,
# since we rely on it being there in make_module_req_guess
if self.comp_libs_subdir:
custom_paths['dirs'].append(self.comp_libs_subdir)
custom_commands = ["which ifort"]
IntelBase.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands)
def make_module_req_guess(self):
"""
Additional paths to consider for prepend-paths statements in module file
"""
guesses = super(EB_ifort, self).make_module_req_guess()
if LooseVersion(self.version) >= LooseVersion('2016'):
# This enables the creation of fortran 2008 bindings in MPI
guesses['CPATH'].append('include')
return guesses
|
mfraezz/osf.io | refs/heads/develop | addons/box/settings/defaults.py | 14 | # OAuth app keys
BOX_KEY = None
BOX_SECRET = None
# https://docs.box.com/docs/oauth-20#section-6-using-the-access-and-refresh-tokens
EXPIRY_TIME = 60 * 60 * 24 * 60 # 60 days
REFRESH_TIME = 5 * 60 # 5 minutes
BOX_OAUTH_TOKEN_ENDPOINT = 'https://www.box.com/api/oauth2/token'
BOX_OAUTH_AUTH_ENDPOINT = 'https://www.box.com/api/oauth2/authorize'
BOX_OAUTH_REVOKE_ENDPOINT = 'https://api.box.com/oauth2/revoke'
# Max file size permitted by frontend in megabytes
MAX_UPLOAD_SIZE = 250
|
Telestream/telestream-cloud-python-sdk | refs/heads/master | telestream_cloud_qc_sdk/telestream_cloud_qc/models/p_se_parameter_type.py | 1 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class PSeParameterType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
PSESTANDARD = "PSEStandard"
PSESTRICT = "PSEStrict"
allowable_values = [PSESTANDARD, PSESTRICT] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""PSeParameterType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PSeParameterType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PSeParameterType):
return True
return self.to_dict() != other.to_dict()
|
creativcoder/servo | refs/heads/master | tests/wpt/css-tests/tools/pytest/_pytest/pastebin.py | 181 | """ submit failure or test session information to a pastebin service. """
import pytest
import sys
import tempfile
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption('--pastebin', metavar="mode",
action='store', dest="pastebin", default=None,
choices=['failed', 'all'],
help="send failed|all info to bpaste.net pastebin service.")
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
import py
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin('terminalreporter')
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
# pastebin file will be utf-8 encoded binary file
config._pastebinfile = tempfile.TemporaryFile('w+b')
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if py.builtin._istext(s):
s = s.encode('utf-8')
config._pastebinfile.write(s)
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, '_pastebinfile'):
# get terminal contents and delete file
config._pastebinfile.seek(0)
sessionlog = config._pastebinfile.read()
config._pastebinfile.close()
del config._pastebinfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin('terminalreporter')
del tr._tw.__dict__['write']
# write summary
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents):
"""
Creates a new paste using bpaste.net service.
:contents: paste contents as utf-8 encoded bytes
:returns: url to the pasted contents
"""
import re
if sys.version_info < (3, 0):
from urllib import urlopen, urlencode
else:
from urllib.request import urlopen
from urllib.parse import urlencode
params = {
'code': contents,
'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
'expiry': '1week',
}
url = 'https://bpaste.net'
response = urlopen(url, data=urlencode(params).encode('ascii')).read()
m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
if m:
return '%s/show/%s' % (url, m.group(1))
else:
return 'bad response: ' + response
def pytest_terminal_summary(terminalreporter):
import _pytest.config
if terminalreporter.config.option.pastebin != "failed":
return
tr = terminalreporter
if 'failed' in tr.stats:
terminalreporter.write_sep("=", "Sending information to Paste Service")
for rep in terminalreporter.stats.get('failed'):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
tr.write_line("%s --> %s" %(msg, pastebinurl))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.