repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
darafferty/factor
|
refs/heads/master
|
docs/source/numfig.py
|
17
|
from docutils.nodes import figure, caption, Text, reference, raw, SkipNode, Element
from sphinx.roles import XRefRole
# Element classes
class page_ref(reference):
pass
class num_ref(reference):
pass
# Visit/depart functions
def skip_page_ref(self, node):
raise SkipNode
def latex_visit_page_ref(self, node):
self.body.append("\\pageref{%s:%s}" % (node['refdoc'], node['reftarget']))
raise SkipNode
def latex_visit_num_ref(self, node):
fields = node['reftarget'].split('#')
if len(fields) > 1:
label, target = fields
ref_link = '%s:%s' % (node['refdoc'], target)
latex = "\\hyperref[%s]{%s \\ref*{%s}}" % (ref_link, label, ref_link)
self.body.append(latex)
else:
self.body.append('\\ref{%s:%s}' % (node['refdoc'], fields[0]))
raise SkipNode
def doctree_read(app, doctree):
# first generate figure numbers for each figure
env = app.builder.env
figid_docname_map = getattr(env, 'figid_docname_map', {})
for figure_info in doctree.traverse(figure):
for id in figure_info['ids']:
figid_docname_map[id] = env.docname
env.figid_docname_map = figid_docname_map
def doctree_resolved(app, doctree, docname):
i = 1
figids = {}
for figure_info in doctree.traverse(figure):
if app.builder.name != 'latex' and app.config.number_figures:
for cap in figure_info.traverse(caption):
cap[0] = Text("%s %d: %s" % (app.config.figure_caption_prefix, i, cap[0]))
for id in figure_info['ids']:
figids[id] = i
i += 1
# replace numfig nodes with links
if app.builder.name != 'latex':
for ref_info in doctree.traverse(num_ref):
if '#' in ref_info['reftarget']:
label, target = ref_info['reftarget'].split('#')
labelfmt = label + " %d"
else:
labelfmt = '%d'
target = ref_info['reftarget']
if target not in figids:
continue
if app.builder.name == 'html':
target_doc = app.builder.env.figid_docname_map[target]
link = "%s#%s" % (app.builder.get_relative_uri(docname, target_doc),
target)
html = '<a class="pageref" href="%s">%s</a>' % (link, labelfmt %(figids[target]))
ref_info.replace_self(raw(html, html, format='html'))
else:
ref_info.replace_self(Text(labelfmt % (figids[target])))
def clean_env(app):
app.builder.env.i=1
app.builder.env.figid_docname_map = {}
def setup(app):
app.add_config_value('number_figures', True, True)
app.add_config_value('figure_caption_prefix', "Figure", True)
app.add_node(page_ref,
text=(skip_page_ref, None),
html=(skip_page_ref, None),
latex=(latex_visit_page_ref, None))
app.add_role('page', XRefRole(nodeclass=page_ref))
app.add_node(num_ref,
latex=(latex_visit_num_ref, None))
app.add_role('num', XRefRole(nodeclass=num_ref))
app.connect("builder-inited", clean_env)
app.connect('doctree-read', doctree_read)
app.connect('doctree-resolved', doctree_resolved)
|
huran2014/huran.github.io
|
refs/heads/master
|
wot_gateway/usr/lib/python2.7/encodings/mac_croatian.py
|
593
|
""" Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
hehongliang/tensorflow
|
refs/heads/master
|
tensorflow/contrib/slim/python/slim/nets/resnet_utils.py
|
69
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=1,
padding='SAME')
net = subsample(net, factor=stride)
whereas
net = tf.contrib.layers.conv2d(inputs, num_outputs, 3, stride=stride,
padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding='SAME',
scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = array_ops.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return layers_lib.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding='VALID',
scope=scope)
@add_arg_scope
def stack_blocks_dense(net,
blocks,
output_stride=None,
outputs_collections=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError('The target output_stride cannot be reached.')
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': ops.GraphKeys.UPDATE_OPS,
}
with arg_scope(
[layers_lib.conv2d],
weights_regularizer=regularizers.l2_regularizer(weight_decay),
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([layers.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
|
sudosurootdev/external_chromium_org
|
refs/heads/L5
|
components/policy/tools/syntax_check_policy_template_json.py
|
34
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Checks a policy_templates.json file for conformity to its syntax specification.
'''
import json
import optparse
import os
import re
import sys
LEADING_WHITESPACE = re.compile('^([ \t]*)')
TRAILING_WHITESPACE = re.compile('.*?([ \t]+)$')
# Matches all non-empty strings that contain no whitespaces.
NO_WHITESPACE = re.compile('[^\s]+$')
# Convert a 'type' to the schema types it may be converted to.
# The 'dict' type represents structured JSON data, and can be converted
# to an 'object' or an 'array'.
TYPE_TO_SCHEMA = {
'int': [ 'integer' ],
'list': [ 'array' ],
'dict': [ 'object', 'array' ],
'main': [ 'boolean' ],
'string': [ 'string' ],
'int-enum': [ 'integer' ],
'string-enum': [ 'string' ],
'string-enum-list': [ 'array' ],
'external': [ 'object' ],
}
# List of boolean policies that have been introduced with negative polarity in
# the past and should not trigger the negative polarity check.
LEGACY_INVERTED_POLARITY_WHITELIST = [
'DeveloperToolsDisabled',
'DeviceAutoUpdateDisabled',
'Disable3DAPIs',
'DisableAuthNegotiateCnameLookup',
'DisablePluginFinder',
'DisablePrintPreview',
'DisableSafeBrowsingProceedAnyway',
'DisableScreenshots',
'DisableSpdy',
'DisableSSLRecordSplitting',
'DriveDisabled',
'DriveDisabledOverCellular',
'ExternalStorageDisabled',
'SavingBrowserHistoryDisabled',
'SyncDisabled',
]
class PolicyTemplateChecker(object):
def __init__(self):
self.error_count = 0
self.warning_count = 0
self.num_policies = 0
self.num_groups = 0
self.num_policies_in_groups = 0
self.options = None
self.features = []
def _Error(self, message, parent_element=None, identifier=None,
offending_snippet=None):
self.error_count += 1
error = ''
if identifier is not None and parent_element is not None:
error += 'In %s %s: ' % (parent_element, identifier)
print error + 'Error: ' + message
if offending_snippet is not None:
print ' Offending:', json.dumps(offending_snippet, indent=2)
def _CheckContains(self, container, key, value_type,
optional=False,
parent_element='policy',
container_name=None,
identifier=None,
offending='__CONTAINER__',
regexp_check=None):
'''
Checks |container| for presence of |key| with value of type |value_type|.
If |value_type| is string and |regexp_check| is specified, then an error is
reported when the value does not match the regular expression object.
|value_type| can also be a list, if more than one type is supported.
The other parameters are needed to generate, if applicable, an appropriate
human-readable error message of the following form:
In |parent_element| |identifier|:
(if the key is not present):
Error: |container_name| must have a |value_type| named |key|.
Offending snippet: |offending| (if specified; defaults to |container|)
(if the value does not have the required type):
Error: Value of |key| must be a |value_type|.
Offending snippet: |container[key]|
Returns: |container[key]| if the key is present, None otherwise.
'''
if identifier is None:
try:
identifier = container.get('name')
except:
self._Error('Cannot access container name of "%s".' % container_name)
return None
if container_name is None:
container_name = parent_element
if offending == '__CONTAINER__':
offending = container
if key not in container:
if optional:
return
else:
self._Error('%s must have a %s "%s".' %
(container_name.title(), value_type.__name__, key),
container_name, identifier, offending)
return None
value = container[key]
value_types = value_type if isinstance(value_type, list) else [ value_type ]
if not any(isinstance(value, type) for type in value_types):
self._Error('Value of "%s" must be one of [ %s ].' %
(key, ', '.join([type.__name__ for type in value_types])),
container_name, identifier, value)
if str in value_types and regexp_check and not regexp_check.match(value):
self._Error('Value of "%s" must match "%s".' %
(key, regexp_check.pattern),
container_name, identifier, value)
return value
def _AddPolicyID(self, id, policy_ids, policy):
'''
Adds |id| to |policy_ids|. Generates an error message if the
|id| exists already; |policy| is needed for this message.
'''
if id in policy_ids:
self._Error('Duplicate id', 'policy', policy.get('name'),
id)
else:
policy_ids.add(id)
def _CheckPolicyIDs(self, policy_ids):
'''
Checks a set of policy_ids to make sure it contains a continuous range
of entries (i.e. no holes).
Holes would not be a technical problem, but we want to ensure that nobody
accidentally omits IDs.
'''
for i in range(len(policy_ids)):
if (i + 1) not in policy_ids:
self._Error('No policy with id: %s' % (i + 1))
def _CheckPolicySchema(self, policy, policy_type):
'''Checks that the 'schema' field matches the 'type' field.'''
self._CheckContains(policy, 'schema', dict)
if isinstance(policy.get('schema'), dict):
self._CheckContains(policy['schema'], 'type', str)
schema_type = policy['schema'].get('type')
if schema_type not in TYPE_TO_SCHEMA[policy_type]:
self._Error('Schema type must match the existing type for policy %s' %
policy.get('name'))
# Checks that boolean policies are not negated (which makes them harder to
# reason about).
if (schema_type == 'boolean' and
'disable' in policy.get('name').lower() and
policy.get('name') not in LEGACY_INVERTED_POLARITY_WHITELIST):
self._Error(('Boolean policy %s uses negative polarity, please make ' +
'new boolean policies follow the XYZEnabled pattern. ' +
'See also http://crbug.com/85687') % policy.get('name'))
def _CheckPolicy(self, policy, is_in_group, policy_ids):
if not isinstance(policy, dict):
self._Error('Each policy must be a dictionary.', 'policy', None, policy)
return
# There should not be any unknown keys in |policy|.
for key in policy:
if key not in ('name', 'type', 'caption', 'desc', 'device_only',
'supported_on', 'label', 'policies', 'items',
'example_value', 'features', 'deprecated', 'future',
'id', 'schema', 'max_size'):
self.warning_count += 1
print ('In policy %s: Warning: Unknown key: %s' %
(policy.get('name'), key))
# Each policy must have a name.
self._CheckContains(policy, 'name', str, regexp_check=NO_WHITESPACE)
# Each policy must have a type.
policy_types = ('group', 'main', 'string', 'int', 'list', 'int-enum',
'string-enum', 'string-enum-list', 'dict', 'external')
policy_type = self._CheckContains(policy, 'type', str)
if policy_type not in policy_types:
self._Error('Policy type must be one of: ' + ', '.join(policy_types),
'policy', policy.get('name'), policy_type)
return # Can't continue for unsupported type.
# Each policy must have a caption message.
self._CheckContains(policy, 'caption', str)
# Each policy must have a description message.
self._CheckContains(policy, 'desc', str)
# If 'label' is present, it must be a string.
self._CheckContains(policy, 'label', str, True)
# If 'deprecated' is present, it must be a bool.
self._CheckContains(policy, 'deprecated', bool, True)
# If 'future' is present, it must be a bool.
self._CheckContains(policy, 'future', bool, True)
if policy_type == 'group':
# Groups must not be nested.
if is_in_group:
self._Error('Policy groups must not be nested.', 'policy', policy)
# Each policy group must have a list of policies.
policies = self._CheckContains(policy, 'policies', list)
# Check sub-policies.
if policies is not None:
for nested_policy in policies:
self._CheckPolicy(nested_policy, True, policy_ids)
# Groups must not have an |id|.
if 'id' in policy:
self._Error('Policies of type "group" must not have an "id" field.',
'policy', policy)
# Statistics.
self.num_groups += 1
else: # policy_type != group
# Each policy must have a protobuf ID.
id = self._CheckContains(policy, 'id', int)
self._AddPolicyID(id, policy_ids, policy)
# 'schema' is the new 'type'.
# TODO(joaodasilva): remove the 'type' checks once 'schema' is used
# everywhere.
self._CheckPolicySchema(policy, policy_type)
# Each policy must have a supported_on list.
supported_on = self._CheckContains(policy, 'supported_on', list)
if supported_on is not None:
for s in supported_on:
if not isinstance(s, str):
self._Error('Entries in "supported_on" must be strings.', 'policy',
policy, supported_on)
# Each policy must have a 'features' dict.
features = self._CheckContains(policy, 'features', dict)
# All the features must have a documenting message.
if features:
for feature in features:
if not feature in self.features:
self._Error('Unknown feature "%s". Known features must have a '
'documentation string in the messages dictionary.' %
feature, 'policy', policy.get('name', policy))
# All user policies must have a per_profile feature flag.
if (not policy.get('device_only', False) and
not policy.get('deprecated', False) and
not filter(re.compile('^chrome_frame:.*').match, supported_on)):
self._CheckContains(features, 'per_profile', bool,
container_name='features',
identifier=policy.get('name'))
# All policies must declare whether they allow changes at runtime.
self._CheckContains(features, 'dynamic_refresh', bool,
container_name='features',
identifier=policy.get('name'))
# Each policy must have an 'example_value' of appropriate type.
if policy_type == 'main':
value_type = item_type = bool
elif policy_type in ('string', 'string-enum'):
value_type = item_type = str
elif policy_type in ('int', 'int-enum'):
value_type = item_type = int
elif policy_type in ('list', 'string-enum-list'):
value_type = list
item_type = str
elif policy_type == 'external':
value_type = item_type = dict
elif policy_type == 'dict':
value_type = item_type = [ dict, list ]
else:
raise NotImplementedError('Unimplemented policy type: %s' % policy_type)
self._CheckContains(policy, 'example_value', value_type)
# Statistics.
self.num_policies += 1
if is_in_group:
self.num_policies_in_groups += 1
if policy_type in ('int-enum', 'string-enum', 'string-enum-list'):
# Enums must contain a list of items.
items = self._CheckContains(policy, 'items', list)
if items is not None:
if len(items) < 1:
self._Error('"items" must not be empty.', 'policy', policy, items)
for item in items:
# Each item must have a name.
# Note: |policy.get('name')| is used instead of |policy['name']|
# because it returns None rather than failing when no key called
# 'name' exists.
self._CheckContains(item, 'name', str, container_name='item',
identifier=policy.get('name'),
regexp_check=NO_WHITESPACE)
# Each item must have a value of the correct type.
self._CheckContains(item, 'value', item_type, container_name='item',
identifier=policy.get('name'))
# Each item must have a caption.
self._CheckContains(item, 'caption', str, container_name='item',
identifier=policy.get('name'))
if policy_type == 'external':
# Each policy referencing external data must specify a maximum data size.
self._CheckContains(policy, 'max_size', int)
def _CheckMessage(self, key, value):
# |key| must be a string, |value| a dict.
if not isinstance(key, str):
self._Error('Each message key must be a string.', 'message', key, key)
return
if not isinstance(value, dict):
self._Error('Each message must be a dictionary.', 'message', key, value)
return
# Each message must have a desc.
self._CheckContains(value, 'desc', str, parent_element='message',
identifier=key)
# Each message must have a text.
self._CheckContains(value, 'text', str, parent_element='message',
identifier=key)
# There should not be any unknown keys in |value|.
for vkey in value:
if vkey not in ('desc', 'text'):
self.warning_count += 1
print 'In message %s: Warning: Unknown key: %s' % (key, vkey)
def _LeadingWhitespace(self, line):
match = LEADING_WHITESPACE.match(line)
if match:
return match.group(1)
return ''
def _TrailingWhitespace(self, line):
match = TRAILING_WHITESPACE.match(line)
if match:
return match.group(1)
return ''
def _LineError(self, message, line_number):
self.error_count += 1
print 'In line %d: Error: %s' % (line_number, message)
def _LineWarning(self, message, line_number):
self.warning_count += 1
print ('In line %d: Warning: Automatically fixing formatting: %s'
% (line_number, message))
def _CheckFormat(self, filename):
if self.options.fix:
fixed_lines = []
with open(filename) as f:
indent = 0
line_number = 0
for line in f:
line_number += 1
line = line.rstrip('\n')
# Check for trailing whitespace.
trailing_whitespace = self._TrailingWhitespace(line)
if len(trailing_whitespace) > 0:
if self.options.fix:
line = line.rstrip()
self._LineWarning('Trailing whitespace.', line_number)
else:
self._LineError('Trailing whitespace.', line_number)
if self.options.fix:
if len(line) == 0:
fixed_lines += ['\n']
continue
else:
if line == trailing_whitespace:
# This also catches the case of an empty line.
continue
# Check for correct amount of leading whitespace.
leading_whitespace = self._LeadingWhitespace(line)
if leading_whitespace.count('\t') > 0:
if self.options.fix:
leading_whitespace = leading_whitespace.replace('\t', ' ')
line = leading_whitespace + line.lstrip()
self._LineWarning('Tab character found.', line_number)
else:
self._LineError('Tab character found.', line_number)
if line[len(leading_whitespace)] in (']', '}'):
indent -= 2
if line[0] != '#': # Ignore 0-indented comments.
if len(leading_whitespace) != indent:
if self.options.fix:
line = ' ' * indent + line.lstrip()
self._LineWarning('Indentation should be ' + str(indent) +
' spaces.', line_number)
else:
self._LineError('Bad indentation. Should be ' + str(indent) +
' spaces.', line_number)
if line[-1] in ('[', '{'):
indent += 2
if self.options.fix:
fixed_lines.append(line + '\n')
# If --fix is specified: backup the file (deleting any existing backup),
# then write the fixed version with the old filename.
if self.options.fix:
if self.options.backup:
backupfilename = filename + '.bak'
if os.path.exists(backupfilename):
os.remove(backupfilename)
os.rename(filename, backupfilename)
with open(filename, 'w') as f:
f.writelines(fixed_lines)
def Main(self, filename, options):
try:
with open(filename) as f:
data = eval(f.read())
except:
import traceback
traceback.print_exc(file=sys.stdout)
self._Error('Invalid Python/JSON syntax.')
return 1
if data == None:
self._Error('Invalid Python/JSON syntax.')
return 1
self.options = options
# First part: check JSON structure.
# Check (non-policy-specific) message definitions.
messages = self._CheckContains(data, 'messages', dict,
parent_element=None,
container_name='The root element',
offending=None)
if messages is not None:
for message in messages:
self._CheckMessage(message, messages[message])
if message.startswith('doc_feature_'):
self.features.append(message[12:])
# Check policy definitions.
policy_definitions = self._CheckContains(data, 'policy_definitions', list,
parent_element=None,
container_name='The root element',
offending=None)
if policy_definitions is not None:
policy_ids = set()
for policy in policy_definitions:
self._CheckPolicy(policy, False, policy_ids)
self._CheckPolicyIDs(policy_ids)
# Second part: check formatting.
self._CheckFormat(filename)
# Third part: summary and exit.
print ('Finished checking %s. %d errors, %d warnings.' %
(filename, self.error_count, self.warning_count))
if self.options.stats:
if self.num_groups > 0:
print ('%d policies, %d of those in %d groups (containing on '
'average %.1f policies).' %
(self.num_policies, self.num_policies_in_groups, self.num_groups,
(1.0 * self.num_policies_in_groups / self.num_groups)))
else:
print self.num_policies, 'policies, 0 policy groups.'
if self.error_count > 0:
return 1
return 0
def Run(self, argv, filename=None):
parser = optparse.OptionParser(
usage='usage: %prog [options] filename',
description='Syntax check a policy_templates.json file.')
parser.add_option('--fix', action='store_true',
help='Automatically fix formatting.')
parser.add_option('--backup', action='store_true',
help='Create backup of original file (before fixing).')
parser.add_option('--stats', action='store_true',
help='Generate statistics.')
(options, args) = parser.parse_args(argv)
if filename is None:
if len(args) != 2:
parser.print_help()
sys.exit(1)
filename = args[1]
return self.Main(filename, options)
if __name__ == '__main__':
sys.exit(PolicyTemplateChecker().Run(sys.argv))
|
allenp/odoo
|
refs/heads/9.0
|
addons/account/models/account_bank_statement.py
|
1
|
# -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.osv import expression
from openerp.tools import float_is_zero
from openerp.tools import float_compare, float_round
from openerp.tools.misc import formatLang
from openerp.exceptions import UserError, ValidationError
import time
import math
class AccountCashboxLine(models.Model):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'coin_value'
_order = 'coin_value'
@api.one
@api.depends('coin_value', 'number')
def _sub_total(self):
""" Calculates Sub total"""
self.subtotal = self.coin_value * self.number
coin_value = fields.Float(string='Coin/Bill Value', required=True, digits=0)
number = fields.Integer(string='Number of Coins/Bills', help='Opening Unit Numbers')
subtotal = fields.Float(compute='_sub_total', string='Subtotal', digits=0, readonly=True)
cashbox_id = fields.Many2one('account.bank.statement.cashbox')
class AccountBankStmtCashWizard(models.Model):
"""
Account Bank Statement popup that allows entering cash details.
"""
_name = 'account.bank.statement.cashbox'
_description = 'Account Bank Statement Cashbox Details'
cashbox_lines_ids = fields.One2many('account.cashbox.line', 'cashbox_id', string='Cashbox Lines')
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('bank_statement_id', False) or self.env.context.get('active_id', False)
bnk_stmt = self.env['account.bank.statement'].browse(bnk_stmt_id)
total = 0.0
for lines in self.cashbox_lines_ids:
total += lines.subtotal
if self.env.context.get('balance', False) == 'start':
#starting balance
bnk_stmt.write({'balance_start': total, 'cashbox_start_id': self.id})
else:
#closing balance
bnk_stmt.write({'balance_end_real': total, 'cashbox_end_id': self.id})
return {'type': 'ir.actions.act_window_close'}
class AccountBankStmtCloseCheck(models.TransientModel):
"""
Account Bank Statement wizard that check that closing balance is correct.
"""
_name = 'account.bank.statement.closebalance'
_description = 'Account Bank Statement closing balance'
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('active_id', False)
if bnk_stmt_id:
self.env['account.bank.statement'].browse(bnk_stmt_id).button_confirm_bank()
return {'type': 'ir.actions.act_window_close'}
class AccountBankStatement(models.Model):
@api.one
@api.depends('line_ids', 'balance_start', 'line_ids.amount', 'balance_end_real')
def _end_balance(self):
self.total_entry_encoding = sum([line.amount for line in self.line_ids])
self.balance_end = self.balance_start + self.total_entry_encoding
self.difference = self.balance_end_real - self.balance_end
@api.one
@api.depends('journal_id')
def _compute_currency(self):
self.currency_id = self.journal_id.currency_id or self.env.user.company_id.currency_id
@api.one
@api.depends('line_ids.journal_entry_ids')
def _check_lines_reconciled(self):
self.all_lines_reconciled = all([line.journal_entry_ids.ids or line.account_id.id for line in self.line_ids])
@api.model
def _default_journal(self):
journal_type = self.env.context.get('journal_type', False)
company_id = self.env['res.company']._company_default_get('account.bank.statement').id
if journal_type:
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
return journals[0]
return False
@api.multi
def _set_opening_balance(self, journal_id):
last_bnk_stmt = self.search([('journal_id', '=', journal_id), ('state', '=', 'confirm')], order="date_done desc", limit=1)
for bank_stmt in self:
if last_bnk_stmt:
bank_stmt.balance_start = last_bnk_stmt.balance_end
else:
bank_stmt.balance_start = 0
@api.model
def _default_opening_balance(self):
#Search last bank statement and set current opening balance as closing balance of previous one
journal_id = self._context.get('default_journal_id', False) or self._context.get('journal_id', False)
if journal_id:
last_bnk_stmt = self.search([('journal_id', '=', journal_id), ('state', '=', 'confirm')], order="date_done desc", limit=1)
if last_bnk_stmt:
return last_bnk_stmt.balance_end
else:
return 0
else:
return 0
_name = "account.bank.statement"
_description = "Bank Statement"
_order = "date desc, id desc"
_inherit = ['mail.thread']
name = fields.Char(string='Reference', states={'open': [('readonly', False)]}, copy=False, readonly=True)
date = fields.Date(required=True, states={'confirm': [('readonly', True)]}, select=True, copy=False, default=fields.Date.context_today)
date_done = fields.Datetime(string="Closed On")
balance_start = fields.Monetary(string='Starting Balance', states={'confirm': [('readonly', True)]}, default=_default_opening_balance)
balance_end_real = fields.Monetary('Ending Balance', states={'confirm': [('readonly', True)]})
state = fields.Selection([('open', 'New'), ('confirm', 'Validated')], string='Status', required=True, readonly=True, copy=False, default='open')
currency_id = fields.Many2one('res.currency', compute='_compute_currency', oldname='currency')
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'confirm': [('readonly', True)]}, default=_default_journal)
journal_type = fields.Selection(related='journal_id.type', help="Technical field used for usability purposes")
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env['res.company']._company_default_get('account.bank.statement'))
total_entry_encoding = fields.Monetary('Transactions Subtotal', compute='_end_balance', store=True, help="Total of transaction lines.")
balance_end = fields.Monetary('Computed Balance', compute='_end_balance', store=True, help='Balance as calculated based on Opening Balance and transaction lines')
difference = fields.Monetary(compute='_end_balance', store=True, help="Difference between the computed ending balance and the specified ending balance.")
line_ids = fields.One2many('account.bank.statement.line', 'statement_id', string='Statement lines', states={'confirm': [('readonly', True)]}, copy=True)
move_line_ids = fields.One2many('account.move.line', 'statement_id', string='Entry lines', states={'confirm': [('readonly', True)]})
all_lines_reconciled = fields.Boolean(compute='_check_lines_reconciled')
user_id = fields.Many2one('res.users', string='Responsible', required=False, default=lambda self: self.env.user)
cashbox_start_id = fields.Many2one('account.bank.statement.cashbox')
cashbox_end_id = fields.Many2one('account.bank.statement.cashbox')
@api.onchange('journal_id')
def onchange_journal_id(self):
self._set_opening_balance(self.journal_id.id)
@api.multi
def _balance_check(self):
for stmt in self:
if not stmt.currency_id.is_zero(stmt.difference):
if stmt.journal_type == 'cash':
if stmt.difference < 0.0:
account = stmt.journal_id.loss_account_id
name = _('Loss')
else:
# statement.difference > 0.0
account = stmt.journal_id.profit_account_id
name = _('Profit')
if not account:
raise UserError(_('There is no account defined on the journal %s for %s involved in a cash difference.') % (stmt.journal_id.name, name))
values = {
'statement_id': stmt.id,
'account_id': account.id,
'amount': stmt.difference,
'name': _("Cash difference observed during the counting (%s)") % name,
}
self.env['account.bank.statement.line'].create(values)
else:
balance_end_real = formatLang(self.env, stmt.balance_end_real, currency_obj=stmt.currency_id)
balance_end = formatLang(self.env, stmt.balance_end, currency_obj=stmt.currency_id)
raise UserError(_('The ending balance is incorrect !\nThe expected balance (%s) is different from the computed one. (%s)')
% (balance_end_real, balance_end))
return True
@api.model
def create(self, vals):
if not vals.get('name'):
journal_id = vals.get('journal_id', self._context.get('default_journal_id', False))
journal = self.env['account.journal'].browse(journal_id)
vals['name'] = journal.sequence_id.with_context(ir_sequence_date=vals.get('date')).next_by_id()
return super(AccountBankStatement, self).create(vals)
@api.multi
def unlink(self):
for statement in self:
if statement.state != 'open':
raise UserError(_('In order to delete a bank statement, you must first cancel it to delete related journal items.'))
# Explicitly unlink bank statement lines so it will check that the related journal entries have been deleted first
statement.line_ids.unlink()
return super(AccountBankStatement, self).unlink()
@api.multi
def open_cashbox_id(self):
context = dict(self.env.context or {})
if context.get('cashbox_id'):
context['active_id'] = self.id
return {
'name': _('Cash Control'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.bank.statement.cashbox',
'view_id': self.env.ref('account.view_account_bnk_stmt_cashbox').id,
'type': 'ir.actions.act_window',
'res_id': self.env.context.get('cashbox_id'),
'context': context,
'target': 'new'
}
@api.multi
def button_cancel(self):
for statement in self:
if any(line.journal_entry_ids.ids for line in statement.line_ids):
raise UserError(_('A statement cannot be canceled when its lines are reconciled.'))
self.state = 'open'
@api.multi
def check_confirm_bank(self):
if self.journal_type == 'cash' and not self.currency_id.is_zero(self.difference):
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_view_account_bnk_stmt_check')
if action_rec:
action = action_rec.read([])[0]
return action
return self.button_confirm_bank()
@api.multi
def button_confirm_bank(self):
self._balance_check()
statements = self.filtered(lambda r: r.state == 'open')
for statement in statements:
moves = self.env['account.move']
for st_line in statement.line_ids:
if st_line.account_id and not st_line.journal_entry_ids.ids:
st_line.fast_counterpart_creation()
elif not st_line.journal_entry_ids.ids:
raise UserError(_('All the account entries lines must be processed in order to close the statement.'))
moves = (moves | st_line.journal_entry_ids)
if moves:
moves.post()
statement.message_post(body=_('Statement %s confirmed, journal items were created.') % (statement.name,))
statements.link_bank_to_partner()
statements.write({'state': 'confirm', 'date_done': time.strftime("%Y-%m-%d %H:%M:%S")})
@api.multi
def button_journal_entries(self):
context = dict(self._context or {})
context['journal_id'] = self.journal_id.id
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('statement_id', 'in', self.ids)],
'context': context,
}
@api.multi
def button_open(self):
""" Changes statement state to Running."""
for statement in self:
if not statement.name:
context = {'ir_sequence_date', statement.date}
if statement.journal_id.sequence_id:
st_number = statement.journal_id.sequence_id.with_context(context).next_by_id()
else:
SequenceObj = self.env['ir.sequence']
st_number = SequenceObj.with_context(context).next_by_code('account.bank.statement')
statement.name = st_number
statement.state = 'open'
@api.multi
def reconciliation_widget_preprocess(self):
""" Get statement lines of the specified statements or all unreconciled statement lines and try to automatically reconcile them / find them a partner.
Return ids of statement lines left to reconcile and other data for the reconciliation widget.
"""
statements = self
bsl_obj = self.env['account.bank.statement.line']
# NB : The field account_id can be used at the statement line creation/import to avoid the reconciliation process on it later on,
# this is why we filter out statements lines where account_id is set
st_lines_filter = [('journal_entry_ids', '=', False), ('account_id', '=', False)]
if statements:
st_lines_filter += [('statement_id', 'in', statements.ids)]
# Try to automatically reconcile statement lines
automatic_reconciliation_entries = []
st_lines_left = self.env['account.bank.statement.line']
for st_line in bsl_obj.search(st_lines_filter):
res = st_line.auto_reconcile()
if not res:
st_lines_left = (st_lines_left | st_line)
else:
automatic_reconciliation_entries.append(res.ids)
# Try to set statement line's partner
for st_line in st_lines_left:
if st_line.name and not st_line.partner_id:
additional_domain = [('ref', '=', st_line.name)]
match_recs = st_line.get_move_lines_for_reconciliation(limit=1, additional_domain=additional_domain, overlook_partner=True)
if match_recs and match_recs[0].partner_id:
st_line.write({'partner_id': match_recs[0].partner_id.id})
# Collect various informations for the reconciliation widget
notifications = []
num_auto_reconciled = len(automatic_reconciliation_entries)
if num_auto_reconciled > 0:
auto_reconciled_message = num_auto_reconciled > 1 \
and _("%d transactions were automatically reconciled.") % num_auto_reconciled \
or _("1 transaction was automatically reconciled.")
notifications += [{
'type': 'info',
'message': auto_reconciled_message,
'details': {
'name': _("Automatically reconciled items"),
'model': 'account.move',
'ids': automatic_reconciliation_entries
}
}]
lines = []
for el in statements:
lines.extend(el.line_ids.ids)
lines = list(set(lines))
return {
'st_lines_ids': st_lines_left.ids,
'notifications': notifications,
'statement_name': len(statements) == 1 and statements[0].name or False,
'num_already_reconciled_lines': statements and bsl_obj.search_count([('journal_entry_ids', '!=', False), ('id', 'in', lines)]) or 0,
}
@api.multi
def link_bank_to_partner(self):
for statement in self:
for st_line in statement.line_ids:
if st_line.bank_account_id and st_line.partner_id and st_line.bank_account_id.partner_id != st_line.partner_id:
st_line.bank_account_id.partner_id = st_line.partner_id
class AccountBankStatementLine(models.Model):
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_order = "statement_id desc, sequence"
_inherit = ['ir.needaction_mixin']
name = fields.Char(string='Memo', required=True)
date = fields.Date(required=True, default=lambda self: self._context.get('date', fields.Date.context_today(self)))
amount = fields.Monetary(digits=0, currency_field='journal_currency_id')
journal_currency_id = fields.Many2one('res.currency', related='statement_id.currency_id',
help='Utility field to express amount currency', readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner')
bank_account_id = fields.Many2one('res.partner.bank', string='Bank Account')
account_id = fields.Many2one('account.account', string='Counterpart Account', domain=[('deprecated', '=', False)],
help="This technical field can be used at the statement line creation/import time in order to avoid the reconciliation"
" process on it later on. The statement line will simply create a counterpart on this account")
statement_id = fields.Many2one('account.bank.statement', string='Statement', index=True, required=True, ondelete='cascade')
journal_id = fields.Many2one('account.journal', related='statement_id.journal_id', string='Journal', store=True, readonly=True)
partner_name = fields.Char(help="This field is used to record the third party name when importing bank statement in electronic format,"
" when the partner doesn't exist yet in the database (or cannot be found).")
ref = fields.Char(string='Reference')
note = fields.Text(string='Notes')
sequence = fields.Integer(index=True, help="Gives the sequence order when displaying a list of bank statement lines.", default=1)
company_id = fields.Many2one('res.company', related='statement_id.company_id', string='Company', store=True, readonly=True)
journal_entry_ids = fields.One2many('account.move', 'statement_line_id', 'Journal Entries', copy=False, readonly=True)
amount_currency = fields.Monetary(help="The amount expressed in an optional other currency if it is a multi-currency entry.")
currency_id = fields.Many2one('res.currency', string='Currency', help="The optional other currency if it is a multi-currency entry.")
@api.one
@api.constrains('amount')
def _check_amount(self):
# This constraint could possibly underline flaws in bank statement import (eg. inability to
# support hacks such as using dummy transactions to give additional informations)
if self.amount == 0:
raise ValidationError(_('A transaction can\'t have a 0 amount.'))
@api.one
@api.constrains('amount', 'amount_currency')
def _check_amount_currency(self):
if self.amount_currency != 0 and self.amount == 0:
raise ValidationError(_('If "Amount Currency" is specified, then "Amount" must be as well.'))
@api.multi
def unlink(self):
for line in self:
if line.journal_entry_ids.ids:
raise UserError(_('In order to delete a bank statement line, you must first cancel it to delete related journal items.'))
return super(AccountBankStatementLine, self).unlink()
@api.model
def _needaction_domain_get(self):
return [('journal_entry_ids', '=', False), ('account_id', '=', False)]
@api.multi
def button_cancel_reconciliation(self):
# TOCKECK : might not behave as expected in case of reconciliations (match statement line with already
# registered payment) or partial reconciliations : it will completely remove the existing payment.
move_recs = self.env['account.move']
for st_line in self:
move_recs = (move_recs | st_line.journal_entry_ids)
if move_recs:
for move in move_recs:
move.line_ids.remove_move_reconcile()
move_recs.write({'statement_line_id': False})
move_recs.button_cancel()
move_recs.unlink()
####################################################
# Reconciliation interface methods
####################################################
@api.multi
def get_data_for_reconciliation_widget(self, excluded_ids=None):
""" Returns the data required to display a reconciliation widget, for each statement line in self """
excluded_ids = excluded_ids or []
ret = []
for st_line in self:
aml_recs = st_line.get_reconciliation_proposition(excluded_ids=excluded_ids)
target_currency = st_line.currency_id or st_line.journal_id.currency_id or st_line.journal_id.company_id.currency_id
rp = aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=st_line.date)
excluded_ids += [move_line['id'] for move_line in rp]
ret.append({
'st_line': st_line.get_statement_line_for_reconciliation_widget(),
'reconciliation_proposition': rp
})
return ret
def get_statement_line_for_reconciliation_widget(self):
""" Returns the data required by the bank statement reconciliation widget to display a statement line """
statement_currency = self.journal_id.currency_id or self.journal_id.company_id.currency_id
if self.amount_currency and self.currency_id:
amount = self.amount_currency
amount_currency = self.amount
amount_currency_str = amount_currency > 0 and amount_currency or -amount_currency
amount_currency_str = formatLang(self.env, amount_currency_str, currency_obj=statement_currency)
else:
amount = self.amount
amount_currency_str = ""
amount_str = formatLang(self.env, abs(amount), currency_obj=self.currency_id or statement_currency)
data = {
'id': self.id,
'ref': self.ref,
'note': self.note or "",
'name': self.name,
'date': self.date,
'amount': amount,
'amount_str': amount_str, # Amount in the statement line currency
'currency_id': self.currency_id.id or statement_currency.id,
'partner_id': self.partner_id.id,
'journal_id': self.journal_id.id,
'statement_id': self.statement_id.id,
'account_code': self.journal_id.default_debit_account_id.code,
'account_name': self.journal_id.default_debit_account_id.name,
'partner_name': self.partner_id.name,
'communication_partner_name': self.partner_name,
'amount_currency_str': amount_currency_str, # Amount in the statement currency
'has_no_partner': not self.partner_id.id,
}
if self.partner_id:
if amount > 0:
data['open_balance_account_id'] = self.partner_id.property_account_receivable_id.id
else:
data['open_balance_account_id'] = self.partner_id.property_account_payable_id.id
return data
@api.multi
def get_move_lines_for_reconciliation_widget(self, excluded_ids=None, str=False, offset=0, limit=None):
""" Returns move lines for the bank statement reconciliation widget, formatted as a list of dicts
"""
aml_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str, offset=offset, limit=limit)
target_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
return aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=self.date)
####################################################
# Reconciliation methods
####################################################
def get_move_lines_for_reconciliation(self, excluded_ids=None, str=False, offset=0, limit=None, additional_domain=None, overlook_partner=False):
""" Return account.move.line records which can be used for bank statement reconciliation.
:param excluded_ids:
:param str:
:param offset:
:param limit:
:param additional_domain:
:param overlook_partner:
"""
# Domain to fetch registered payments (use case where you encode the payment before you get the bank statement)
reconciliation_aml_accounts = [self.journal_id.default_credit_account_id.id, self.journal_id.default_debit_account_id.id]
domain_reconciliation = ['&', ('statement_id', '=', False), ('account_id', 'in', reconciliation_aml_accounts)]
# Domain to fetch unreconciled payables/receivables (use case where you close invoices/refunds by reconciling your bank statements)
domain_matching = [('reconciled', '=', False)]
if self.partner_id.id or overlook_partner:
domain_matching = expression.AND([domain_matching, [('account_id.internal_type', 'in', ['payable', 'receivable'])]])
else:
# TODO : find out what use case this permits (match a check payment, registered on a journal whose account type is other instead of liquidity)
domain_matching = expression.AND([domain_matching, [('account_id.reconcile', '=', True)]])
# Let's add what applies to both
domain = expression.OR([domain_reconciliation, domain_matching])
if self.partner_id.id and not overlook_partner:
domain = expression.AND([domain, [('partner_id', '=', self.partner_id.id)]])
# Domain factorized for all reconciliation use cases
ctx = dict(self._context or {})
ctx['bank_statement_line'] = self
generic_domain = self.env['account.move.line'].with_context(ctx).domain_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str)
domain = expression.AND([domain, generic_domain])
# Domain from caller
if additional_domain is None:
additional_domain = []
else:
additional_domain = expression.normalize_domain(additional_domain)
domain = expression.AND([domain, additional_domain])
return self.env['account.move.line'].search(domain, offset=offset, limit=limit, order="date_maturity asc, id asc")
def _get_domain_maker_move_line_amount(self):
""" Returns a function that can create the appropriate domain to search on move.line amount based on statement.line currency/amount """
company_currency = self.journal_id.company_id.currency_id
st_line_currency = self.currency_id or self.journal_id.currency_id
currency = (st_line_currency and st_line_currency != company_currency) and st_line_currency.id or False
field = currency and 'amount_residual_currency' or 'amount_residual'
precision = st_line_currency and st_line_currency.decimal_places or company_currency.decimal_places
def ret(comparator, amount, p=precision, f=field, c=currency):
if comparator == '<':
if amount < 0:
domain = [(f, '<', 0), (f, '>', amount)]
else:
domain = [(f, '>', 0), (f, '<', amount)]
elif comparator == '=':
domain = [(f, '=', float_round(amount, precision_digits=p))]
else:
raise UserError(_("Programmation error : domain_maker_move_line_amount requires comparator '=' or '<'"))
domain += [('currency_id', '=', c)]
return domain
return ret
def get_reconciliation_proposition(self, excluded_ids=None):
""" Returns move lines that constitute the best guess to reconcile a statement line
Note: it only looks for move lines in the same currency as the statement line.
"""
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
elif len(match_recs) == 0:
move = self.env['account.move'].search([('name', '=', self.name)], limit=1)
if move:
domain = [('move_id', '=', move.id)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
amount = self.amount_currency or self.amount
# Look for a single move line with the same amount
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=1, additional_domain=amount_domain_maker('=', amount))
if match_recs:
return match_recs
if not self.partner_id:
return self.env['account.move.line']
# Select move lines until their total amount is greater than the statement line amount
domain = [('reconciled', '=', False)]
domain += [('account_id.user_type_id.type', '=', amount > 0 and 'receivable' or 'payable')] # Make sure we can't mix receivable and payable
domain += amount_domain_maker('<', amount) # Will also enforce > 0
mv_lines = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=5, additional_domain=domain)
st_line_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
ret = self.env['account.move.line']
total = 0
for line in mv_lines:
total += line.currency_id and line.amount_residual_currency or line.amount_residual
if float_compare(total, abs(amount), precision_digits=st_line_currency.rounding) != -1:
break
ret = (ret | line)
return ret
def _get_move_lines_for_auto_reconcile(self):
""" Returns the move lines that the method auto_reconcile can use to try to reconcile the statement line """
pass
@api.multi
def auto_reconcile(self):
""" Try to automatically reconcile the statement.line ; return the counterpart journal entry/ies if the automatic reconciliation succeeded, False otherwise.
TODO : this method could be greatly improved and made extensible
"""
self.ensure_one()
match_recs = self.env['account.move.line']
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
equal_amount_domain = amount_domain_maker('=', self.amount_currency or self.amount)
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = equal_amount_domain + [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) != 1:
return False
# Look for a single move line with the same partner, the same amount
if not match_recs:
if self.partner_id:
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=equal_amount_domain)
if match_recs and len(match_recs) != 1:
return False
if not match_recs:
return False
# Now reconcile
counterpart_aml_dicts = []
payment_aml_rec = self.env['account.move.line']
for aml in match_recs:
if aml.account_id.internal_type == 'liquidity':
payment_aml_rec = (payment_aml_rec | aml)
else:
amount = aml.currency_id and aml.amount_residual_currency or aml.amount_residual
counterpart_aml_dicts.append({
'name': aml.name if aml.name != '/' else aml.move_id.name,
'debit': amount < 0 and -amount or 0,
'credit': amount > 0 and amount or 0,
'move_line': aml
})
try:
with self._cr.savepoint():
counterpart = self.process_reconciliation(counterpart_aml_dicts=counterpart_aml_dicts, payment_aml_rec=payment_aml_rec)
return counterpart
except UserError:
# A configuration / business logic error that makes it impossible to auto-reconcile should not be raised
# since automatic reconciliation is just an amenity and the user will get the same exception when manually
# reconciling. Other types of exception are (hopefully) programmation errors and should cause a stacktrace.
self.invalidate_cache()
self.env['account.move'].invalidate_cache()
self.env['account.move.line'].invalidate_cache()
return False
def _prepare_reconciliation_move(self, move_name):
""" Prepare the dict of values to create the move from a statement line. This method may be overridden to adapt domain logic
through model inheritance (make sure to call super() to establish a clean extension chain).
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'statement_line_id': self.id,
'journal_id': self.statement_id.journal_id.id,
'date': self.date,
'name': move_name,
'ref': self.ref,
}
def _prepare_reconciliation_move_line(self, move, amount):
""" Prepare the dict of values to create the move line from a statement line.
:param recordset move: the account.move to link the move line
:param float amount: the amount of transaction that wasn't already reconciled
"""
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
amount_currency = False
if statement_currency != company_currency or st_line_currency != company_currency:
# First get the ratio total mount / amount not already reconciled
if statement_currency == company_currency:
total_amount = self.amount
elif st_line_currency == company_currency:
total_amount = self.amount_currency
else:
total_amount = statement_currency.with_context({'date': self.date}).compute(self.amount, company_currency)
ratio = total_amount / amount
# Then use it to adjust the statement.line field that correspond to the move.line amount_currency
if statement_currency != company_currency:
amount_currency = self.amount * ratio
elif st_line_currency != company_currency:
amount_currency = self.amount_currency * ratio
return {
'name': self.name,
'date': self.date,
'ref': self.ref,
'move_id': move.id,
'partner_id': self.partner_id and self.partner_id.id or False,
'account_id': amount >= 0 \
and self.statement_id.journal_id.default_credit_account_id.id \
or self.statement_id.journal_id.default_debit_account_id.id,
'credit': amount < 0 and -amount or 0.0,
'debit': amount > 0 and amount or 0.0,
'statement_id': self.statement_id.id,
'journal_id': self.statement_id.journal_id.id,
'currency_id': statement_currency != company_currency and statement_currency.id or (st_line_currency != company_currency and st_line_currency.id or False),
'amount_currency': amount_currency,
}
@api.v7
def process_reconciliations(self, cr, uid, ids, data, context=None):
""" Handles data sent from the bank statement reconciliation widget (and can otherwise serve as an old-API bridge)
:param list of dicts data: must contains the keys 'counterpart_aml_dicts', 'payment_aml_ids' and 'new_aml_dicts',
whose value is the same as described in process_reconciliation except that ids are used instead of recordsets.
"""
aml_obj = self.pool['account.move.line']
for id, datum in zip(ids, data):
st_line = self.browse(cr, uid, id, context)
payment_aml_rec = aml_obj.browse(cr, uid, datum.get('payment_aml_ids', []), context)
for aml_dict in datum.get('counterpart_aml_dicts', []):
aml_dict['move_line'] = aml_obj.browse(cr, uid, aml_dict['counterpart_aml_id'], context)
del aml_dict['counterpart_aml_id']
st_line.process_reconciliation(datum.get('counterpart_aml_dicts', []), payment_aml_rec, datum.get('new_aml_dicts', []))
def fast_counterpart_creation(self):
for st_line in self:
# Technical functionality to automatically reconcile by creating a new move line
vals = {
'name': st_line.name,
'debit': st_line.amount < 0 and -st_line.amount or 0.0,
'credit': st_line.amount > 0 and st_line.amount or 0.0,
'account_id': st_line.account_id.id,
}
st_line.process_reconciliation(new_aml_dicts=[vals])
def process_reconciliation(self, counterpart_aml_dicts=None, payment_aml_rec=None, new_aml_dicts=None):
""" Match statement lines with existing payments (eg. checks) and/or payables/receivables (eg. invoices and refunds) and/or new move lines (eg. write-offs).
If any new journal item needs to be created (via new_aml_dicts or counterpart_aml_dicts), a new journal entry will be created and will contain those
items, as well as a journal item for the bank statement line.
Finally, mark the statement line as reconciled by putting the matched moves ids in the column journal_entry_ids.
:param (list of dicts) counterpart_aml_dicts: move lines to create to reconcile with existing payables/receivables.
The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'move_line'
# The move line to reconcile (partially if specified debit/credit is lower than move line's credit/debit)
:param (list of recordsets) payment_aml_rec: recordset move lines representing existing payments (which are already fully reconciled)
:param (list of dicts) new_aml_dicts: move lines to create. The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'account_id'
- (optional) 'tax_ids'
- (optional) Other account.move.line fields like analytic_account_id or analytics_id
:returns: The journal entries with which the transaction was matched. If there was at least an entry in counterpart_aml_dicts or new_aml_dicts, this list contains
the move created by the reconciliation, containing entries for the statement.line (1), the counterpart move lines (0..*) and the new move lines (0..*).
"""
counterpart_aml_dicts = counterpart_aml_dicts or []
payment_aml_rec = payment_aml_rec or self.env['account.move.line']
new_aml_dicts = new_aml_dicts or []
aml_obj = self.env['account.move.line']
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
counterpart_moves = self.env['account.move']
# Check and prepare received data
if self.journal_entry_ids.ids:
raise UserError(_('The bank statement line was already reconciled.'))
if any(rec.statement_id for rec in payment_aml_rec):
raise UserError(_('A selected move line was already reconciled.'))
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].reconciled:
raise UserError(_('A selected move line was already reconciled.'))
if isinstance(aml_dict['move_line'], (int, long)):
aml_dict['move_line'] = aml_obj.browse(aml_dict['move_line'])
for aml_dict in (counterpart_aml_dicts + new_aml_dicts):
if aml_dict.get('tax_ids') and aml_dict['tax_ids'] and isinstance(aml_dict['tax_ids'][0], (int, long)):
# Transform the value in the format required for One2many and Many2many fields
aml_dict['tax_ids'] = map(lambda id: (4, id, None), aml_dict['tax_ids'])
# Fully reconciled moves are just linked to the bank statement
for aml_rec in payment_aml_rec:
aml_rec.write({'statement_id': self.statement_id.id})
aml_rec.move_id.write({'statement_line_id': self.id})
counterpart_moves = (counterpart_moves | aml_rec.move_id)
# Create move line(s). Either matching an existing journal entry (eg. invoice), in which
# case we reconcile the existing and the new move lines together, or being a write-off.
if counterpart_aml_dicts or new_aml_dicts:
st_line_currency = self.currency_id or statement_currency
st_line_currency_rate = self.currency_id and (self.amount_currency / self.amount) or False
# Create the move
move_name = (self.statement_id.name or self.name) + "/" + str(self.sequence)
move_vals = self._prepare_reconciliation_move(move_name)
move = self.env['account.move'].create(move_vals)
move.post()
counterpart_moves = (counterpart_moves | move)
# Complete dicts to create both counterpart move lines and write-offs
to_create = (counterpart_aml_dicts + new_aml_dicts)
ctx = dict(self._context, date=self.date)
for aml_dict in to_create:
aml_dict['move_id'] = move.id
aml_dict['partner_id'] = self.partner_id.id
aml_dict['statement_id'] = self.statement_id.id
if st_line_currency.id != company_currency.id:
aml_dict['amount_currency'] = aml_dict['debit'] - aml_dict['credit']
aml_dict['currency_id'] = st_line_currency.id
if self.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:
# Statement is in company currency but the transaction is in foreign currency
aml_dict['debit'] = company_currency.round(aml_dict['debit'] / st_line_currency_rate)
aml_dict['credit'] = company_currency.round(aml_dict['credit'] / st_line_currency_rate)
elif self.currency_id and st_line_currency_rate:
# Statement is in foreign currency and the transaction is in another one
aml_dict['debit'] = statement_currency.with_context(ctx).compute(aml_dict['debit'] / st_line_currency_rate, company_currency)
aml_dict['credit'] = statement_currency.with_context(ctx).compute(aml_dict['credit'] / st_line_currency_rate, company_currency)
else:
# Statement is in foreign currency and no extra currency is given for the transaction
aml_dict['debit'] = st_line_currency.with_context(ctx).compute(aml_dict['debit'], company_currency)
aml_dict['credit'] = st_line_currency.with_context(ctx).compute(aml_dict['credit'], company_currency)
elif statement_currency.id != company_currency.id:
# Statement is in foreign currency but the transaction is in company currency
prorata_factor = (aml_dict['debit'] - aml_dict['credit']) / self.amount_currency
aml_dict['amount_currency'] = prorata_factor * self.amount
aml_dict['currency_id'] = statement_currency.id
# Create the move line for the statement line using the total credit/debit of the counterpart
# This leaves out the amount already reconciled and avoids rounding errors from currency conversion
st_line_amount = sum(aml_dict['credit'] - aml_dict['debit'] for aml_dict in to_create)
aml_obj.with_context(check_move_validity=False).create(self._prepare_reconciliation_move_line(move, st_line_amount))
# Create write-offs
for aml_dict in new_aml_dicts:
aml_obj.with_context(check_move_validity=False).create(aml_dict)
# Create counterpart move lines and reconcile them
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].partner_id.id:
aml_dict['partner_id'] = aml_dict['move_line'].partner_id.id
aml_dict['account_id'] = aml_dict['move_line'].account_id.id
counterpart_move_line = aml_dict.pop('move_line')
if counterpart_move_line.currency_id and counterpart_move_line.currency_id != company_currency and not aml_dict.get('currency_id'):
aml_dict['currency_id'] = counterpart_move_line.currency_id.id
aml_dict['amount_currency'] = company_currency.with_context(ctx).compute(aml_dict['debit'] - aml_dict['credit'], counterpart_move_line.currency_id)
new_aml = aml_obj.with_context(check_move_validity=False).create(aml_dict)
(new_aml | counterpart_move_line).reconcile()
counterpart_moves.assert_balanced()
return counterpart_moves
|
kbrock/FrameworkBenchmarks
|
refs/heads/master
|
frameworks/Python/historical/webware/app/Context/queries.py
|
77
|
import json
from random import randint
from functools import partial
from WebKit.HTTPContent import HTTPContent
from DbSession import Database
from World import World
import UrlHelper
class queries(HTTPContent):
def defaultAction(self):
self.response().clearHeaders()
self.response()._headers["Content-Type"] = "application/json"
num_queries = UrlHelper.getQueryNum(self.request().field("queries"))
rp = partial(randint, 1, 10000)
get = Database.DbSession.query(World).get
worlds = [get(rp()).serialize() for _ in xrange(num_queries)]
output = json.dumps(worlds)
self.response()._headers["Content-Length"] = len(output)
self.write(output)
|
jammerful/buildbot
|
refs/heads/master
|
master/buildbot/test/unit/test_changes_changes.py
|
10
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import pprint
import re
import textwrap
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.changes import changes
from buildbot.test.fake import fakedb
from buildbot.test.fake import fakemaster
class Change(unittest.TestCase):
change23_rows = [
fakedb.Change(changeid=23, author="dustin", comments="fix whitespace",
branch="warnerdb", revision="deadbeef",
when_timestamp=266738404, revlink='http://warner/0e92a098b',
category='devel', repository='git://warner', codebase='mainapp',
project='Buildbot'),
fakedb.ChangeFile(changeid=23, filename='master/README.txt'),
fakedb.ChangeFile(changeid=23, filename='worker/README.txt'),
fakedb.ChangeProperty(changeid=23, property_name='notest',
property_value='["no","Change"]'),
fakedb.ChangeUser(changeid=23, uid=27),
]
def setUp(self):
self.master = fakemaster.make_master(testcase=self, wantDb=True)
self.change23 = changes.Change(**dict( # using **dict(..) forces kwargs
category='devel',
repository=u'git://warner',
codebase=u'mainapp',
who=u'dustin',
when=266738404,
comments=u'fix whitespace',
project=u'Buildbot',
branch=u'warnerdb',
revlink=u'http://warner/0e92a098b',
properties={'notest': "no"},
files=[u'master/README.txt', u'worker/README.txt'],
revision=u'deadbeef'))
self.change23.number = 23
self.change24 = changes.Change(**dict(
category='devel',
repository=u'git://warner',
codebase=u'mainapp',
who=u'dustin',
when=266738405,
comments=u'fix whitespace again',
project=u'Buildbot',
branch=u'warnerdb',
revlink=u'http://warner/0e92a098c',
properties={'notest': "no"},
files=[u'master/README.txt', u'worker/README.txt'],
revision=u'deadbeef'))
self.change24.number = 24
self.change25 = changes.Change(**dict(
category='devel',
repository=u'git://warner',
codebase=u'mainapp',
who=u'dustin',
when=266738406,
comments=u'fix whitespace again',
project=u'Buildbot',
branch=u'warnerdb',
revlink=u'http://warner/0e92a098d',
properties={'notest': "no"},
files=[u'master/README.txt', u'worker/README.txt'],
revision=u'deadbeef'))
self.change25.number = 25
@defer.inlineCallbacks
def test_fromChdict(self):
# get a real honest-to-goodness chdict from the fake db
yield self.master.db.insertTestData(self.change23_rows)
chdict = yield self.master.db.changes.getChange(23)
exp = self.change23
got = yield changes.Change.fromChdict(self.master, chdict)
# compare
ok = True
ok = ok and got.number == exp.number
ok = ok and got.who == exp.who
ok = ok and sorted(got.files) == sorted(exp.files)
ok = ok and got.comments == exp.comments
ok = ok and got.revision == exp.revision
ok = ok and got.when == exp.when
ok = ok and got.branch == exp.branch
ok = ok and got.category == exp.category
ok = ok and got.revlink == exp.revlink
ok = ok and got.properties == exp.properties
ok = ok and got.repository == exp.repository
ok = ok and got.codebase == exp.codebase
ok = ok and got.project == exp.project
if not ok:
def printable(c):
return pprint.pformat(c.__dict__)
self.fail("changes do not match; expected\n%s\ngot\n%s" %
(printable(exp), printable(got)))
def test_str(self):
string = str(self.change23)
self.assertTrue(re.match(r"Change\(.*\)", string), string)
def test_asText(self):
text = self.change23.asText()
self.assertTrue(re.match(textwrap.dedent(u'''\
Files:
master/README.txt
worker/README.txt
On: git://warner
For: Buildbot
At: .*
Changed By: dustin
Comments: fix whitespaceProperties:.
notest: no
'''), text), text)
def test_asDict(self):
dict = self.change23.asDict()
self.assertIn('1978', dict['at']) # timezone-sensitive
del dict['at']
self.assertEqual(dict, {
'branch': u'warnerdb',
'category': u'devel',
'codebase': u'mainapp',
'comments': u'fix whitespace',
'files': [{'name': u'master/README.txt'},
{'name': u'worker/README.txt'}],
'number': 23,
'project': u'Buildbot',
'properties': [('notest', 'no', 'Change')],
'repository': u'git://warner',
'rev': u'deadbeef',
'revision': u'deadbeef',
'revlink': u'http://warner/0e92a098b',
'when': 266738404,
'who': u'dustin'})
def test_getShortAuthor(self):
self.assertEqual(self.change23.getShortAuthor(), 'dustin')
def test_getTime(self):
# careful, or timezones will hurt here
self.assertIn('Jun 1978', self.change23.getTime())
def test_getTimes(self):
self.assertEqual(self.change23.getTimes(), (266738404, None))
def test_getText(self):
self.change23.who = 'nasty < nasty' # test the html escaping (ugh!)
self.assertEqual(self.change23.getText(), ['nasty < nasty'])
def test_getLogs(self):
self.assertEqual(self.change23.getLogs(), {})
def test_compare(self):
self.assertEqual(self.change23, self.change23)
self.assertNotEqual(self.change24, self.change23)
self.assertGreater(self.change24, self.change23)
self.assertGreaterEqual(self.change24, self.change23)
self.assertGreaterEqual(self.change24, self.change24)
self.assertLessEqual(self.change24, self.change24)
self.assertLessEqual(self.change23, self.change24)
self.assertLess(self.change23, self.change25)
|
affo/nova
|
refs/heads/master
|
nova/openstack/common/cliutils.py
|
57
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# W0603: Using the global statement
# W0621: Redefining name %s from outer scope
# pylint: disable=W0603,W0621
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
import six
from six import moves
from nova.openstack.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in six.iteritems(dct):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
|
olemis/brython
|
refs/heads/master
|
www/src/Lib/xml/etree/cElementTree.py
|
876
|
# Deprecated alias for xml.etree.ElementTree
from xml.etree.ElementTree import *
|
hainm/scikit-learn
|
refs/heads/master
|
sklearn/utils/tests/test_sparsefuncs.py
|
157
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
|
bthirion/scikit-learn
|
refs/heads/master
|
examples/applications/plot_topics_extraction_with_nmf_lda.py
|
12
|
"""
========================================================
Topic extraction with Non-negative Matrix Factorization\
and Latent Dirichlet Allocation
========================================================
This is an example of applying :class:`sklearn.decomposition.NMF` and
:class:`sklearn.decomposition.LatentDirichletAllocation` on a corpus
of documents and extract additive models of the topic structure of the
corpus. The output is a list of topics, each represented as a list of
terms (weights are not shown).
Non-negative Matrix Factorization is applied with two different objective
functions: the Frobenius norm, and the generalized Kullback-Leibler divergence.
The latter is equivalent to Probabilistic Latent Semantic Indexing.
The default parameters (n_samples / n_features / n_components) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_components = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
print("done in %0.3fs." % (time() - t0))
# Use tf-idf features for NMF.
print("Extracting tf-idf features for NMF...")
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tfidf = tfidf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print()
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
# Fit the NMF model
print("Fitting the NMF model (generalized Kullback-Leibler divergence) with "
"tf-idf features, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_components, random_state=1,
beta_loss='kullback-leibler', solver='mu', max_iter=1000, alpha=.1,
l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (generalized Kullback-Leibler divergence):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_components=n_components, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
t0 = time()
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
|
ueshin/apache-spark
|
refs/heads/master
|
python/pyspark/sql/pandas/serializers.py
|
23
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Serializers for PyArrow and pandas conversions. See `pyspark.serializers` for more details.
"""
from pyspark.serializers import Serializer, read_int, write_int, UTF8Deserializer
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class ArrowCollectSerializer(Serializer):
"""
Deserialize a stream of batches followed by batch order information. Used in
PandasConversionMixin._collect_as_arrow() after invoking Dataset.collectAsArrowToPython()
in the JVM.
"""
def __init__(self):
self.serializer = ArrowStreamSerializer()
def dump_stream(self, iterator, stream):
return self.serializer.dump_stream(iterator, stream)
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices or propagate any error that occurred in the JVM
num = read_int(stream)
if num == -1:
error_msg = UTF8Deserializer().loads(stream)
raise RuntimeError("An error occurred while calling "
"ArrowCollectSerializer.load_stream: {}".format(error_msg))
batch_order = []
for i in range(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
def __repr__(self):
return "ArrowCollectSerializer(%s)" % self.serializer
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
class ArrowStreamPandasSerializer(ArrowStreamSerializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
Parameters
----------
timezone : str
A timezone to respect when handling timestamp values
safecheck : bool
If True, conversion from Arrow to Pandas checks for overflow/truncation
assign_cols_by_name : bool
If True, then Pandas DataFrames will get columns by name
"""
def __init__(self, timezone, safecheck, assign_cols_by_name):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
self._safecheck = safecheck
self._assign_cols_by_name = assign_cols_by_name
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.pandas.types import _check_series_localize_timestamps, \
_convert_map_items_to_dict
import pyarrow
# If the given column is a date type column, creates a series of datetime.date directly
# instead of creating datetime64[ns] as intermediate data to avoid overflow caused by
# datetime64[ns] type handling.
s = arrow_column.to_pandas(date_as_object=True)
if pyarrow.types.is_timestamp(arrow_column.type):
return _check_series_localize_timestamps(s, self._timezone)
elif pyarrow.types.is_map(arrow_column.type):
return _convert_map_items_to_dict(s)
else:
return s
def _create_batch(self, series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
Parameters
----------
series : pandas.Series or list
A single series, list of series, or list of (series, arrow_type)
Returns
-------
pyarrow.RecordBatch
Arrow RecordBatch
"""
import pandas as pd
import pyarrow as pa
from pyspark.sql.pandas.types import _check_series_convert_timestamps_internal, \
_convert_dict_to_map_items
from pandas.api.types import is_categorical_dtype
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s, self._timezone)
elif t is not None and pa.types.is_map(t):
s = _convert_dict_to_map_items(s)
elif is_categorical_dtype(s.dtype):
# Note: This can be removed once minimum pyarrow version is >= 0.16.1
s = s.astype(s.dtypes.categories.dtype)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck)
except ValueError as e:
if self._safecheck:
error_msg = "Exception thrown when converting pandas.Series (%s) to " + \
"Arrow Array (%s). It can be caused by overflows or other " + \
"unsafe conversions warned by Arrow. Arrow safe type check " + \
"can be disabled by using SQL config " + \
"`spark.sql.execution.pandas.convertToArrowArraySafely`."
raise ValueError(error_msg % (s.dtype, t)) from e
else:
raise e
return array
arrs = []
for s, t in series:
if t is not None and pa.types.is_struct(t):
if not isinstance(s, pd.DataFrame):
raise ValueError("A field of type StructType expects a pandas.DataFrame, "
"but got: %s" % str(type(s)))
# Input partition and result pandas.DataFrame empty, make empty Arrays with struct
if len(s) == 0 and len(s.columns) == 0:
arrs_names = [(pa.array([], type=field.type), field.name) for field in t]
# Assign result columns by schema name if user labeled with strings
elif self._assign_cols_by_name and any(isinstance(name, str)
for name in s.columns):
arrs_names = [(create_array(s[field.name], field.type), field.name)
for field in t]
# Assign result columns by position
else:
arrs_names = [(create_array(s[s.columns[i]], field.type), field.name)
for i, field in enumerate(t)]
struct_arrs, struct_names = zip(*arrs_names)
arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names))
else:
arrs.append(create_array(s, t))
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in range(len(arrs))])
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
batches = (self._create_batch(series) for series in iterator)
super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)
import pyarrow as pa
for batch in batches:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class ArrowStreamPandasUDFSerializer(ArrowStreamPandasSerializer):
"""
Serializer used by Python worker to evaluate Pandas UDFs
"""
def __init__(self, timezone, safecheck, assign_cols_by_name, df_for_struct=False):
super(ArrowStreamPandasUDFSerializer, self) \
.__init__(timezone, safecheck, assign_cols_by_name)
self._df_for_struct = df_for_struct
def arrow_to_pandas(self, arrow_column):
import pyarrow.types as types
if self._df_for_struct and types.is_struct(arrow_column.type):
import pandas as pd
series = [super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(column)
.rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
s = pd.concat(series, axis=1)
else:
s = super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(arrow_column)
return s
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
def __repr__(self):
return "ArrowStreamPandasUDFSerializer"
class CogroupUDFSerializer(ArrowStreamPandasUDFSerializer):
def load_stream(self, stream):
"""
Deserialize Cogrouped ArrowRecordBatches to a tuple of Arrow tables and yield as two
lists of pandas.Series.
"""
import pyarrow as pa
dataframes_in_group = None
while dataframes_in_group is None or dataframes_in_group > 0:
dataframes_in_group = read_int(stream)
if dataframes_in_group == 2:
batch1 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
batch2 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
yield (
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch1).itercolumns()],
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch2).itercolumns()]
)
elif dataframes_in_group != 0:
raise ValueError(
'Invalid number of pandas.DataFrames in group {0}'.format(dataframes_in_group))
|
OCA/sale-workflow
|
refs/heads/12.0
|
sale_order_product_recommendation/models/sale_order.py
|
1
|
# Copyright 2020 Tecnativa - Sergio Teruel
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
# Add database index to get last sale price query performance
product_id = fields.Many2one(index=True)
|
Fusion-Rom/android_external_skia
|
refs/heads/lp5.1
|
platform_tools/android/bin/gyp_to_android.py
|
66
|
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script for generating the Android framework's version of Skia from gyp
files.
"""
import os
import shutil
import sys
import tempfile
# Find the top of trunk
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SKIA_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir,
os.pardir))
# Find the directory with our helper files, and add it to the path.
ANDROID_TOOLS = os.path.join(SKIA_DIR, 'platform_tools', 'android')
sys.path.append(ANDROID_TOOLS)
import gyp_gen.android_framework_gyp as android_framework_gyp
import gyp_gen.gypd_parser as gypd_parser
import gyp_gen.generate_user_config as generate_user_config
import gyp_gen.makefile_writer as makefile_writer
import gyp_gen.tool_makefile_writer as tool_makefile_writer
import gyp_gen.vars_dict_lib as vars_dict_lib
# Folder containing all gyp files and generated gypd files.
GYP_FOLDER = 'gyp'
def generate_var_dict(target_dir, target_file, skia_arch_type, have_neon):
"""Create a VarsDict for a particular arch type.
Each paramater is passed directly to android_framework_gyp.main().
Args:
target_dir: Directory containing gyp files.
target_file: Target gyp file.
skia_arch_type: Target architecture.
have_neon: Whether the target should build for neon.
Returns:
A VarsDict containing the variable definitions determined by gyp.
"""
result_file = android_framework_gyp.main(target_dir, target_file,
skia_arch_type, have_neon)
var_dict = vars_dict_lib.VarsDict()
gypd_parser.parse_gypd(var_dict, result_file, '.')
android_framework_gyp.clean_gypd_files(target_dir)
print '.',
return var_dict
def main(target_dir=None, require_sk_user_config=False):
"""Create Android.mk for the Android framework's external/skia.
Builds Android.mk using Skia's gyp files.
Args:
target_dir: Directory in which to place 'Android.mk'. If None, the file
will be placed in skia's root directory.
require_sk_user_config: If True, raise an AssertionError if
SkUserConfig.h does not exist.
"""
# Create a temporary folder to hold gyp and gypd files. Create it in SKIA_DIR
# so that it is a sibling of gyp/, so the relationships between gyp files and
# other files (e.g. platform_tools/android/gyp/dependencies.gypi, referenced
# by android_deps.gyp as a relative path) is unchanged.
# Use mkdtemp to find an unused folder name, but then delete it so copytree
# can be called with a non-existent directory.
tmp_folder = tempfile.mkdtemp(dir=SKIA_DIR)
os.rmdir(tmp_folder)
shutil.copytree(os.path.join(SKIA_DIR, GYP_FOLDER), tmp_folder)
try:
main_gyp_file = 'android_framework_lib.gyp'
print 'Creating Android.mk',
# Generate a separate VarsDict for each architecture type. For each
# archtype:
# 1. call android_framework_gyp.main() to generate gypd files
# 2. call parse_gypd to read those gypd files into the VarsDict
# 3. delete the gypd files
#
# Once we have the VarsDict for each architecture type, we combine them all
# into a single Android.mk file, which can build targets of any
# architecture type.
# The default uses a non-existant archtype, to find all the general
# variable definitions.
default_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'other',
False)
arm_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm', False)
arm_neon_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm',
True)
x86_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'x86', False)
mips_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips', False)
mips64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'mips64',
False)
arm64_var_dict = generate_var_dict(tmp_folder, main_gyp_file, 'arm64',
False)
# Compute the intersection of all targets. All the files in the intersection
# should be part of the makefile always. Each dict will now contain trimmed
# lists containing only variable definitions specific to that configuration.
var_dict_list = [default_var_dict, arm_var_dict, arm_neon_var_dict,
x86_var_dict, mips_var_dict, mips64_var_dict,
arm64_var_dict]
common = vars_dict_lib.intersect(var_dict_list)
common.LOCAL_MODULE.add('libskia')
# Create SkUserConfig
user_config = os.path.join(SKIA_DIR, 'include', 'config', 'SkUserConfig.h')
if target_dir:
dst_dir = target_dir
else:
dst_dir = os.path.join(SKIA_DIR, 'include', 'core')
generate_user_config.generate_user_config(
original_sk_user_config=user_config,
require_sk_user_config=require_sk_user_config, target_dir=dst_dir,
ordered_set=common.DEFINES)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='tests.gyp',
skia_trunk=target_dir,
dest_dir='tests',
skia_lib_var_dict=common,
local_module_name='skia_test',
local_module_tags=['tests'])
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='bench.gyp',
skia_trunk=target_dir,
dest_dir='bench',
skia_lib_var_dict=common,
local_module_name='skia_bench',
local_module_tags=['tests'],
place_in_local_tmp=True)
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='gm.gyp',
skia_trunk=target_dir,
dest_dir='gm',
skia_lib_var_dict=common,
local_module_name='skia_gm',
local_module_tags=['tests'])
tool_makefile_writer.generate_tool(gyp_dir=tmp_folder,
target_file='dm.gyp',
skia_trunk=target_dir,
dest_dir='dm',
skia_lib_var_dict=common,
local_module_name='skia_dm',
local_module_tags=['tests'])
# Now that the defines have been written to SkUserConfig and they've been
# used to skip adding them to the tools makefiles, they are not needed in
# Android.mk. Reset DEFINES.
common.DEFINES.reset()
# Further trim arm_neon_var_dict with arm_var_dict. After this call,
# arm_var_dict (which will now be the intersection) includes all definitions
# used by both arm and arm + neon, and arm_neon_var_dict will only contain
# those specific to arm + neon.
arm_var_dict = vars_dict_lib.intersect([arm_var_dict, arm_neon_var_dict])
# Now create a list of VarsDictData holding everything but common.
deviations_from_common = []
deviations_from_common.append(makefile_writer.VarsDictData(
arm_var_dict, 'arm'))
deviations_from_common.append(makefile_writer.VarsDictData(
arm_neon_var_dict, 'arm', 'ARCH_ARM_HAVE_NEON'))
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86'))
# Currently, x86_64 is identical to x86
deviations_from_common.append(makefile_writer.VarsDictData(x86_var_dict,
'x86_64'))
deviations_from_common.append(makefile_writer.VarsDictData(mips_var_dict,
'mips'))
deviations_from_common.append(makefile_writer.VarsDictData(mips64_var_dict,
'mips64'))
deviations_from_common.append(makefile_writer.VarsDictData(arm64_var_dict,
'arm64'))
makefile_writer.write_android_mk(target_dir=target_dir,
common=common, deviations_from_common=deviations_from_common)
finally:
shutil.rmtree(tmp_folder)
if __name__ == '__main__':
main()
|
Yinzhe-Qi/YuiOss
|
refs/heads/master
|
yui_oss/console.py
|
1
|
# -*- coding: utf-8 -*-
import argparse
from colorama import init, Fore
from argparse import ArgumentParser
from .manager import OssFileManager, VERSION
from .exception import *
import yaml
import sys
import os
class Yui:
path = sys.path[0]
if os.path.isfile(path):
path = os.path.dirname(path)
ATTR_FILE = path + "/.yui"
def __init__(self, config_file_path):
try:
with open(self.ATTR_FILE, 'r') as f:
self.attrs = yaml.load(f)
self.profile = self.attrs["profile"] if self.attrs and "profile" in self.attrs else None
self.bucket = self.attrs["bucket"] if self.attrs and "bucket" in self.attrs else None
self.root = self.attrs["root"] if self.attrs and "root" in self.attrs else ""
except FileNotFoundError:
with open(self.ATTR_FILE, "w") as f:
self.attrs = {
"profile": None,
"bucket": None,
"root": ""
}
yaml.dump(self.attrs, f)
self.profile = None
self.bucket = None
self.root = ""
with open(config_file_path, 'r') as f:
self.config = yaml.load(f)
profiles = self.config["profiles"]
if len(profiles.keys()):
if not self.profile or self.profile not in profiles.keys():
self.profile = list(profiles.keys())[0]
if not self.bucket:
self.bucket = profiles[self.profile]["default_bucket"]
self.fm = OssFileManager(profiles[self.profile]["auth_key"],
profiles[self.profile]["auth_key_secret"],
profiles[self.profile]["endpoint"],
self.bucket,
proxies=self.config["proxies"])
self.attrs["profile"] = self.profile
self.attrs["bucket"] = self.bucket
self.update_attr()
self.args = None
self.methods = ("cd", "pf", "bkt", "ls", "ul", "dl", "cp", "mv", "rm")
self.parser = ArgumentParser(description="YuiOss console application ver " + VERSION)
self.parser.set_defaults(verbose=True)
self.parser.add_argument("-v", "--verbose", dest="verbose", action="store_true")
self.parser.add_argument("-q", "--quiet", dest="verbose", action="store_false")
self.parser.add_argument("-a", "--all", action="store_true")
self.parser.add_argument("-r", "--recursive", action="store_true")
self.parser.add_argument("-l", "--list", action="store_true")
self.parser.add_argument("-d", "--delete", action="store_true")
self.parser.add_argument("-c", "--create", action="store_true")
self.parser.add_argument("method", choices=self.methods, nargs=1)
self.parser.add_argument("args", nargs=argparse.ZERO_OR_MORE)
init()
def run(self):
self.args = self.parser.parse_args()
method = self.args.method[0]
if method in self.methods:
self.__getattribute__(method)()
def update_attr(self):
with open(self.ATTR_FILE, 'w+') as f:
yaml.dump(self.attrs, f)
def on_success(self, method, src, dest, result):
if self.args.verbose:
print(Fore.GREEN + str(method) + " success: " +
src + ("" if not dest else " --> " + dest))
def on_error(self, method, src, dest, result):
print(Fore.RED + str(method) + " success: " +
src + ("" if not dest else " --> " + dest))
def on_progress(self, consumed_bytes, total_bytes):
if total_bytes:
rate = int(100 * (float(consumed_bytes) / float(total_bytes)))
end = '\n' if rate == 100 else ''
print('\rprogress: {0}%'.format(rate), end=end)
def cd(self):
"""
change oss current directory
:param path: new current directory, will be considered as absolute path if starts with '/'
:return:
"""
self.basic_info_print()
if not len(self.args.args):
self.root = ""
else:
path = self.args.args[0]
if not self.fm.is_dir(path):
print(Fore.RED + "cd path should be a directory")
return
path = self.fm.norm_path(path)
tmp_root = path[1:] if path.startswith(self.fm.SEP) else self.root + path
root_segs = tmp_root.split(self.fm.SEP)
new_root_segs = []
for seg in root_segs:
if seg == ".":
continue
elif seg == "..":
new_root_segs.pop() if len(new_root_segs) > 0 else None
else:
new_root_segs.append(seg)
self.root = self.fm.SEP.join(new_root_segs)
self.attrs["root"] = self.root
self.update_attr()
print(Fore.GREEN + "current directory changed to: /" + self.root)
def pf(self):
"""
profile related operations:
no optional parameter : switch current profile to the given profile,
if no profile name argument is given, show current profile name
-l, --list : list all profile names
:return:
"""
self.basic_info_print()
try:
profiles = self.config["profiles"]
# list profile
if self.args.list:
print(Fore.GREEN + "listing {0} profiles:\n".format(len(profiles)) +
'\t'.join(profiles.keys()) if len(profiles)
else (Fore.YELLOW + "no profile found in config.yaml"))
# show current profile
elif len(self.args.args) < 1:
print(Fore.GREEN + "current profile is : " + self.profile)
# change profile
else:
if self.args.args[0] in profiles.keys():
self.profile = self.args.args[0]
self.bucket = profiles[self.profile]["default_bucket"]
self.fm = OssFileManager(profiles[self.profile]["auth_key"],
profiles[self.profile]["auth_key_secret"],
profiles[self.profile]["endpoint"],
self.bucket,
proxies=self.config["proxies"])
self.attrs["profile"] = self.profile
self.attrs["bucket"] = self.bucket
self.update_attr()
print(Fore.GREEN + "current profile changed to : " + self.profile)
else:
print(Fore.RED + "given profile name not found in config.yaml")
except YuiException as e:
print(Fore.RED + e)
def bkt(self):
# FIXME: multiple bugs found
"""
bucket related operations:
no optional parameter : change current bucket to the given bucket,
if no bucket name argument is given, show current bucket name
-l, --list : list all bucket names
-d, --delete : delete bucket
-c, --create : create bucket
"""
self.basic_info_print()
try:
# list bucket
if self.args.list:
buckets = self.fm.list_bucket()
print(Fore.GREEN + "listing {0} buckets:\n".format(len(buckets)) +
'\t'.join(buckets) if len(buckets)
else (Fore.YELLOW + "there is no bucket"))
# create bucket
elif self.args.create:
for bkt in self.args.args:
self.fm.create_bucket(bkt)
# delete bucket
elif self.args.delete:
for bkt in self.args.args:
self.fm.delete_bucket(bkt)
# show current bucket
elif len(self.args.args) < 1:
print(Fore.GREEN + "current bucket is : " + self.fm.bucket_name)
# change bucket
else:
self.fm.change_bucket(self.args.args[0])
self.bucket = self.fm.bucket_name
self.attrs["bucket"] = self.bucket
self.update_attr()
print(Fore.GREEN + "current bucket changed to : " + self.fm.bucket_name)
except YuiException as e:
print(Fore.RED + e)
def ls(self):
"""
list sub directories and files of current directory
:return:
"""
self.basic_info_print()
files = [obj.key.replace(self.root, '') for obj in self.fm.list_dir(self.root, self.args.all) if obj.key != self.root]
print((Fore.GREEN + "listing {0} files in /{1}:\n".format(len(files), self.root) +
'\t'.join(files)) if len(files)
else (Fore.YELLOW + "current directory: /{0} is empty.".format(self.root)))
def ul(self):
"""
upload
:return:
"""
self.basic_info_print()
if len(self.args.args) < 1:
print(Fore.RED + "'ul' needs at least one input argument: src[, dest]")
return
src = self.args.args[0]
if len(self.args.args) < 2:
dest = self.root
else:
dest = self.args.args[1][1:] if self.args.args[1].startswith(self.fm.SEP) else self.root + self.args.args[1]
try:
self.fm.upload(src, dest,
recursive=self.args.recursive, progress_callback=self.on_progress,
on_success=self.on_success, on_error=self.on_error)
except YuiException as e:
print(Fore.RED + "'ul' encountered an error: \n" +
str(e))
def dl(self):
"""
download
:return:
"""
self.basic_info_print()
if len(self.args.args) < 1:
print(Fore.RED + "'dl' needs at least 1 input argument: src[, dest]")
return
src = self.args.args[0][1:] if self.args.args[0].startswith(self.fm.SEP) else self.root + self.args.args[0]
dest = os.path.abspath(self.args.args[1]) if len(self.args.args) > 1 else os.path.abspath('.')
try:
self.fm.download(src, dest,
recursive=self.args.recursive, progress_callback=self.on_progress,
on_success=self.on_success, on_error=self.on_error)
except YuiException as e:
print(Fore.RED + "'dl' encountered an error: \n" +
str(e))
def cp(self):
"""
copy, recursive by default
:return:
"""
self.basic_info_print()
if len(self.args.args) != 2:
print(Fore.RED + "'cp' needs 2 input arguments: src, dest")
return
src = self.args.args[0][1:] if self.args.args[0].startswith(self.fm.SEP) else self.root + self.args.args[0]
dest = self.args.args[1][1:] if self.args.args[1].startswith(self.fm.SEP) else self.root + self.args.args[1]
try:
self.fm.copy(src, dest,
on_success=self.on_success, on_error=self.on_error)
except YuiException as e:
print(Fore.RED + "'cp' encountered an error: \n" +
str(e))
def mv(self):
"""
move, recursive by default
:return:
"""
self.basic_info_print()
if len(self.args.args) != 2:
print(Fore.RED + "'mv' needs 2 input arguments: src, dest")
return
src = self.args.args[0][1:] if self.args.args[0].startswith(self.fm.SEP) else self.root + self.args.args[0]
dest = self.args.args[1][1:] if self.args.args[1].startswith(self.fm.SEP) else self.root + self.args.args[1]
try:
self.fm.move(src, dest,
on_success=self.on_success, on_error=self.on_error)
except YuiException as e:
print(Fore.RED + "'mv' encountered an error: \n" +
str(e))
def rm(self):
"""
delete
:return:
"""
self.basic_info_print()
if len(self.args.args) != 1:
print(Fore.RED + "'rm' needs 1 input argument: src")
return
src = self.args.args[0][1:] if self.args.args[0].startswith(self.fm.SEP) else self.root + self.args.args[0]
try:
self.fm.delete(src, recursive=self.args.recursive,
on_success=self.on_success, on_error=self.on_error)
except YuiException as e:
print(Fore.RED + "'rm' encountered an error: \n" +
str(e))
def basic_info_print(self):
print(Fore.BLUE + "bucket@ " + self.fm.bucket_name + "\t" +
"root@ " + self.root + "\n")
|
bostrick/tch.cowork
|
refs/heads/master
|
tch/__init__.py
|
916
|
# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
sourcepole/kadas-albireo
|
refs/heads/master
|
tests/src/python/test_qgsrasterfilewriter.py
|
11
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Radim Blazek'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
import glob
from PyQt4.QtCore import (QTemporaryFile,
QDir)
from qgis.core import (QgsRasterLayer,
QgsRasterChecker,
QgsRasterPipe,
QgsRasterFileWriter,
QgsRasterProjector)
from utilities import (unitTestDataPath,
getQgisTestApp,
TestCase,
unittest
#expectedFailure
)
# Convenience instances in case you may need them
# not used in this test
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsRasterFileWriter(TestCase):
def __init__(self,methodName):
unittest.TestCase.__init__(self,methodName)
self.testDataDir = unitTestDataPath()
self.report = "<h1>Python Raster File Writer Tests</h1>\n"
def write(self, theRasterName):
print theRasterName
path = "%s/%s" % ( self.testDataDir, theRasterName )
rasterLayer = QgsRasterLayer(path, "test")
if not rasterLayer.isValid(): return False
provider = rasterLayer.dataProvider()
tmpFile = QTemporaryFile()
tmpFile.open() # fileName is no avialable until open
tmpName = tmpFile.fileName()
tmpFile.close()
# do not remove when class is destroyed so that we can read
# the file and see difference
tmpFile.setAutoRemove ( False )
fileWriter = QgsRasterFileWriter ( tmpName )
pipe = QgsRasterPipe()
if not pipe.set( provider.clone() ):
print "Cannot set pipe provider"
return False
projector = QgsRasterProjector()
projector.setCRS( provider.crs(), provider.crs() )
if not pipe.insert( 2, projector ):
print "Cannot set pipe projector"
return False
fileWriter.writeRaster(
pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs() )
checker = QgsRasterChecker()
ok = checker.runTest( "gdal", tmpName, "gdal", path )
self.report += checker.report()
# All OK, we can delete the file
tmpFile.setAutoRemove ( ok )
return ok
def testWrite(self):
for name in glob.glob( "%s/raster/*.tif" % self.testDataDir ):
baseName = os.path.basename ( name )
allOk = True
ok = self.write( "raster/%s" % baseName )
if not ok: allOk = False
reportFilePath = "%s/qgistest.html" % QDir.tempPath()
reportFile = open(reportFilePath,'a')
reportFile.write( self.report )
reportFile.close()
assert allOk, "Raster file writer test failed"
if __name__ == '__main__':
unittest.main()
|
Aelshafei/a2lr
|
refs/heads/master
|
ua_parser/_regexes.py
|
1
|
############################################
# NOTICE: This file is autogenerated from #
# regexes.yaml. Do not edit by hand, #
# instead, re-run `setup.py build_regexes` #
############################################
#from __future__ import absolute_import
from ua_parser.user_agent_parser import (
UserAgentParser, DeviceParser, OSParser,
)
__all__ = (
'USER_AGENT_PARSERS', 'DEVICE_PARSERS', 'OS_PARSERS',
)
USER_AGENT_PARSERS = [
UserAgentParser(
'(CFNetwork)(?:/(\\d+)\\.(\\d+)\\.?(\\d+)?)?',
'CFNetwork',
None,
None,
),
UserAgentParser(
'(Pingdom.com_bot_version_)(\\d+)\\.(\\d+)',
'PingdomBot',
None,
None,
),
UserAgentParser(
'(facebookexternalhit)/(\\d+)\\.(\\d+)',
'FacebookBot',
None,
None,
),
UserAgentParser(
'Google.*/\\+/web/snippet',
'GooglePlusBot',
None,
None,
),
UserAgentParser(
'(Twitterbot)/(\\d+)\\.(\\d+)',
'TwitterBot',
None,
None,
),
UserAgentParser(
'/((?:Ant-)?Nutch|[A-z]+[Bb]ot|[A-z]+[Ss]pider|Axtaris|fetchurl|Isara|ShopSalad|Tailsweep)[ \\-](\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?',
None,
None,
None,
),
UserAgentParser(
'(008|Altresium|Argus|BaiduMobaider|BoardReader|DNSGroup|DataparkSearch|EDI|Goodzer|Grub|INGRID|Infohelfer|LinkedInBot|LOOQ|Nutch|PathDefender|Peew|PostPost|Steeler|Twitterbot|VSE|WebCrunch|WebZIP|Y!J-BR[A-Z]|YahooSeeker|envolk|sproose|wminer)/(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?',
None,
None,
None,
),
UserAgentParser(
'(MSIE) (\\d+)\\.(\\d+)([a-z]\\d?)?;.* MSIECrawler',
'MSIECrawler',
None,
None,
),
UserAgentParser(
'(Google-HTTP-Java-Client|Apache-HttpClient|http%20client|Python-urllib|HttpMonitor|TLSProber|WinHTTP|JNLP)(?:[ /](\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?)?',
None,
None,
None,
),
UserAgentParser(
'(1470\\.net crawler|50\\.nu|8bo Crawler Bot|Aboundex|Accoona-[A-z]+-Agent|AdsBot-Google(?:-[a-z]+)?|altavista|AppEngine-Google|archive.*?\\.org_bot|archiver|Ask Jeeves|[Bb]ai[Dd]u[Ss]pider(?:-[A-Za-z]+)*|bingbot|BingPreview|blitzbot|BlogBridge|BoardReader(?: [A-Za-z]+)*|boitho.com-dc|BotSeer|\\b\\w*favicon\\w*\\b|\\bYeti(?:-[a-z]+)?|Catchpoint bot|[Cc]harlotte|Checklinks|clumboot|Comodo HTTP\\(S\\) Crawler|Comodo-Webinspector-Crawler|ConveraCrawler|CRAWL-E|CrawlConvera|Daumoa(?:-feedfetcher)?|Feed Seeker Bot|findlinks|Flamingo_SearchEngine|FollowSite Bot|furlbot|Genieo|gigabot|GomezAgent|gonzo1|(?:[a-zA-Z]+-)?Googlebot(?:-[a-zA-Z]+)?|Google SketchUp|grub-client|gsa-crawler|heritrix|HiddenMarket|holmes|HooWWWer|htdig|ia_archiver|ICC-Crawler|Icarus6j|ichiro(?:/mobile)?|IconSurf|IlTrovatore(?:-Setaccio)?|InfuzApp|Innovazion Crawler|InternetArchive|IP2[a-z]+Bot|jbot\\b|KaloogaBot|Kraken|Kurzor|larbin|LEIA|LesnikBot|Linguee Bot|LinkAider|LinkedInBot|Lite Bot|Llaut|lycos|Mail\\.RU_Bot|masidani_bot|Mediapartners-Google|Microsoft .*? Bot|mogimogi|mozDex|MJ12bot|msnbot(?:-media *)?|msrbot|netresearch|Netvibes|NewsGator[^/]*|^NING|Nutch[^/]*|Nymesis|ObjectsSearch|Orbiter|OOZBOT|PagePeeker|PagesInventory|PaxleFramework|Peeplo Screenshot Bot|PlantyNet_WebRobot|Pompos|Read%20Later|Reaper|RedCarpet|Retreiver|Riddler|Rival IQ|scooter|Scrapy|Scrubby|searchsight|seekbot|semanticdiscovery|Simpy|SimplePie|SEOstats|SimpleRSS|SiteCon|Slurp|snappy|Speedy Spider|Squrl Java|TheUsefulbot|ThumbShotsBot|Thumbshots\\.ru|TwitterBot|URL2PNG|Vagabondo|VoilaBot|^vortex|Votay bot|^voyager|WASALive.Bot|Web-sniffer|WebThumb|WeSEE:[A-z]+|WhatWeb|WIRE|WordPress|Wotbox|www\\.almaden\\.ibm\\.com|Xenu(?:.s)? Link Sleuth|Xerka [A-z]+Bot|yacy(?:bot)?|Yahoo[a-z]*Seeker|Yahoo! Slurp|Yandex\\w+|YodaoBot(?:-[A-z]+)?|YottaaMonitor|Yowedo|^Zao|^Zao-Crawler|ZeBot_www\\.ze\\.bz|ZooShot|ZyBorg)(?:[ /]v?(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?)?',
None,
None,
None,
),
UserAgentParser(
'(?:\\/[A-Za-z0-9\\.]+)? *([A-Za-z0-9 \\-_\\!\\[\\]:]*(?:[Aa]rchiver|[Ii]ndexer|[Ss]craper|[Bb]ot|[Ss]pider|[Cc]rawl[a-z]*))/(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?',
None,
None,
None,
),
UserAgentParser(
'(?:\\/[A-Za-z0-9\\.]+)? *([A-Za-z0-9 _\\!\\[\\]:]*(?:[Aa]rchiver|[Ii]ndexer|[Ss]craper|[Bb]ot|[Ss]pider|[Cc]rawl[a-z]*)) (\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?',
None,
None,
None,
),
UserAgentParser(
'((?:[A-z0-9]+|[A-z\\-]+ ?)?(?: the )?(?:[Ss][Pp][Ii][Dd][Ee][Rr]|[Ss]crape|[A-Za-z0-9-]*(?:[^C][^Uu])[Bb]ot|[Cc][Rr][Aa][Ww][Ll])[A-z0-9]*)(?:(?:[ /]| v)(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?)?',
None,
None,
None,
),
UserAgentParser(
'(HbbTV)/(\\d+)\\.(\\d+)\\.(\\d+) \\(',
None,
None,
None,
),
UserAgentParser(
'(Chimera|SeaMonkey|Camino)/(\\d+)\\.(\\d+)\\.?([ab]?\\d+[a-z]*)?',
None,
None,
None,
),
UserAgentParser(
'\\[FB.*;(FBAV)/(\\d+)(?:\\.(\\d+)(?:\\.(\\d)+)?)?',
'Facebook',
None,
None,
),
UserAgentParser(
'\\[(Pinterest)/[^\\]]+\\]',
None,
None,
None,
),
UserAgentParser(
'(Pinterest)(?: for Android(?: Tablet)?)?/(\\d+)(?:\\.(\\d+)(?:\\.(\\d)+)?)?',
None,
None,
None,
),
UserAgentParser(
'(Pale[Mm]oon)/(\\d+)\\.(\\d+)\\.?(\\d+)?',
'Pale Moon (Firefox Variant)',
None,
None,
),
UserAgentParser(
'(Fennec)/(\\d+)\\.(\\d+)\\.?([ab]?\\d+[a-z]*)',
'Firefox Mobile',
None,
None,
),
UserAgentParser(
'(Fennec)/(\\d+)\\.(\\d+)(pre)',
'Firefox Mobile',
None,
None,
),
UserAgentParser(
'(Fennec)/(\\d+)\\.(\\d+)',
'Firefox Mobile',
None,
None,
),
UserAgentParser(
'(?:Mobile|Tablet);.*(Firefox)/(\\d+)\\.(\\d+)',
'Firefox Mobile',
None,
None,
),
UserAgentParser(
'(Namoroka|Shiretoko|Minefield)/(\\d+)\\.(\\d+)\\.(\\d+(?:pre)?)',
'Firefox ($1)',
None,
None,
),
UserAgentParser(
'(Firefox)/(\\d+)\\.(\\d+)(a\\d+[a-z]*)',
'Firefox Alpha',
None,
None,
),
UserAgentParser(
'(Firefox)/(\\d+)\\.(\\d+)(b\\d+[a-z]*)',
'Firefox Beta',
None,
None,
),
UserAgentParser(
'(Firefox)-(?:\\d+\\.\\d+)?/(\\d+)\\.(\\d+)(a\\d+[a-z]*)',
'Firefox Alpha',
None,
None,
),
UserAgentParser(
'(Firefox)-(?:\\d+\\.\\d+)?/(\\d+)\\.(\\d+)(b\\d+[a-z]*)',
'Firefox Beta',
None,
None,
),
UserAgentParser(
'(Namoroka|Shiretoko|Minefield)/(\\d+)\\.(\\d+)([ab]\\d+[a-z]*)?',
'Firefox ($1)',
None,
None,
),
UserAgentParser(
'(Firefox).*Tablet browser (\\d+)\\.(\\d+)\\.(\\d+)',
'MicroB',
None,
None,
),
UserAgentParser(
'(MozillaDeveloperPreview)/(\\d+)\\.(\\d+)([ab]\\d+[a-z]*)?',
None,
None,
None,
),
UserAgentParser(
'(FxiOS)/(\\d+)\\.(\\d+)(\\.(\\d+))?(\\.(\\d+))?',
'Firefox iOS',
None,
None,
),
UserAgentParser(
'(Flock)/(\\d+)\\.(\\d+)(b\\d+?)',
None,
None,
None,
),
UserAgentParser(
'(RockMelt)/(\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Navigator)/(\\d+)\\.(\\d+)\\.(\\d+)',
'Netscape',
None,
None,
),
UserAgentParser(
'(Navigator)/(\\d+)\\.(\\d+)([ab]\\d+)',
'Netscape',
None,
None,
),
UserAgentParser(
'(Netscape6)/(\\d+)\\.(\\d+)\\.?([ab]?\\d+)?',
'Netscape',
None,
None,
),
UserAgentParser(
'(MyIBrow)/(\\d+)\\.(\\d+)',
'My Internet Browser',
None,
None,
),
UserAgentParser(
'(Opera Tablet).*Version/(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
None,
None,
None,
),
UserAgentParser(
'(Opera Mini)(?:/att)?/?(\\d+)?(?:\\.(\\d+))?(?:\\.(\\d+))?',
None,
None,
None,
),
UserAgentParser(
'(Opera)/.+Opera Mobi.+Version/(\\d+)\\.(\\d+)',
'Opera Mobile',
None,
None,
),
UserAgentParser(
'(Opera)/(\\d+)\\.(\\d+).+Opera Mobi',
'Opera Mobile',
None,
None,
),
UserAgentParser(
'Opera Mobi.+(Opera)(?:/|\\s+)(\\d+)\\.(\\d+)',
'Opera Mobile',
None,
None,
),
UserAgentParser(
'Opera Mobi',
'Opera Mobile',
None,
None,
),
UserAgentParser(
'(Opera)/9.80.*Version/(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
None,
None,
None,
),
UserAgentParser(
'(?:Mobile Safari).*(OPR)/(\\d+)\\.(\\d+)\\.(\\d+)',
'Opera Mobile',
None,
None,
),
UserAgentParser(
'(?:Chrome).*(OPR)/(\\d+)\\.(\\d+)\\.(\\d+)',
'Opera',
None,
None,
),
UserAgentParser(
'(Coast)/(\\d+).(\\d+).(\\d+)',
'Opera Coast',
None,
None,
),
UserAgentParser(
'(OPiOS)/(\\d+).(\\d+).(\\d+)',
'Opera Mini',
None,
None,
),
UserAgentParser(
'(hpw|web)OS/(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
'webOS Browser',
None,
None,
),
UserAgentParser(
'(luakit)',
'LuaKit',
None,
None,
),
UserAgentParser(
'(Snowshoe)/(\\d+)\\.(\\d+).(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Lightning)/(\\d+)\\.(\\d+)\\.?((?:[ab]?\\d+[a-z]*)|(?:\\d*))',
None,
None,
None,
),
UserAgentParser(
'(Firefox)/(\\d+)\\.(\\d+)\\.(\\d+(?:pre)?) \\(Swiftfox\\)',
'Swiftfox',
None,
None,
),
UserAgentParser(
'(Firefox)/(\\d+)\\.(\\d+)([ab]\\d+[a-z]*)? \\(Swiftfox\\)',
'Swiftfox',
None,
None,
),
UserAgentParser(
'(rekonq)/(\\d+)\\.(\\d+)\\.?(\\d+)? Safari',
'Rekonq',
None,
None,
),
UserAgentParser(
'rekonq',
'Rekonq',
None,
None,
),
UserAgentParser(
'(conkeror|Conkeror)/(\\d+)\\.(\\d+)\\.?(\\d+)?',
'Conkeror',
None,
None,
),
UserAgentParser(
'(konqueror)/(\\d+)\\.(\\d+)\\.(\\d+)',
'Konqueror',
None,
None,
),
UserAgentParser(
'(WeTab)-Browser',
None,
None,
None,
),
UserAgentParser(
'(Comodo_Dragon)/(\\d+)\\.(\\d+)\\.(\\d+)',
'Comodo Dragon',
None,
None,
),
UserAgentParser(
'(Symphony) (\\d+).(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Minimo)',
None,
None,
None,
),
UserAgentParser(
'PLAYSTATION 3.+WebKit',
'NetFront NX',
None,
None,
),
UserAgentParser(
'PLAYSTATION 3',
'NetFront',
None,
None,
),
UserAgentParser(
'(PlayStation Portable)',
'NetFront',
None,
None,
),
UserAgentParser(
'(PlayStation Vita)',
'NetFront NX',
None,
None,
),
UserAgentParser(
'AppleWebKit.+ (NX)/(\\d+)\\.(\\d+)\\.(\\d+)',
'NetFront NX',
None,
None,
),
UserAgentParser(
'(Nintendo 3DS)',
'NetFront NX',
None,
None,
),
UserAgentParser(
'(Silk)/(\\d+)\\.(\\d+)(?:\\.([0-9\\-]+))?',
'Amazon Silk',
None,
None,
),
UserAgentParser(
'(Puffin)/(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
None,
None,
None,
),
UserAgentParser(
'Windows Phone .*(Edge)/(\\d+)\\.(\\d+)',
'Edge Mobile',
None,
None,
),
UserAgentParser(
'(SamsungBrowser)/(\\d+)\\.(\\d+)',
'Samsung Internet',
None,
None,
),
UserAgentParser(
'(CrMo)/(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)',
'Chrome Mobile',
None,
None,
),
UserAgentParser(
'(CriOS)/(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)',
'Chrome Mobile iOS',
None,
None,
),
UserAgentParser(
'(Chrome)/(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+) Mobile',
'Chrome Mobile',
None,
None,
),
UserAgentParser(
'(chromeframe)/(\\d+)\\.(\\d+)\\.(\\d+)',
'Chrome Frame',
None,
None,
),
UserAgentParser(
'(UCBrowser)[ /](\\d+)\\.(\\d+)\\.(\\d+)',
'UC Browser',
None,
None,
),
UserAgentParser(
'(UC Browser)[ /](\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(UC Browser|UCBrowser|UCWEB)(\\d+)\\.(\\d+)\\.(\\d+)',
'UC Browser',
None,
None,
),
UserAgentParser(
'(SLP Browser)/(\\d+)\\.(\\d+)',
'Tizen Browser',
None,
None,
),
UserAgentParser(
'(SE 2\\.X) MetaSr (\\d+)\\.(\\d+)',
'Sogou Explorer',
None,
None,
),
UserAgentParser(
'(baidubrowser)[/\\s](\\d+)',
'Baidu Browser',
None,
None,
),
UserAgentParser(
'(FlyFlow)/(\\d+)\\.(\\d+)',
'Baidu Explorer',
None,
None,
),
UserAgentParser(
'(MQQBrowser/Mini)(?:(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?)?',
'QQ Browser Mini',
None,
None,
),
UserAgentParser(
'(MQQBrowser)(?:/(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?)?',
'QQ Browser Mobile',
None,
None,
),
UserAgentParser(
'(QQBrowser)(?:/(\\d+)(?:\\.(\\d+)\\.(\\d+)(?:\\.(\\d+))?)?)?',
'QQ Browser',
None,
None,
),
UserAgentParser(
'(Rackspace Monitoring)/(\\d+)\\.(\\d+)',
'RackspaceBot',
None,
None,
),
UserAgentParser(
'(PyAMF)/(\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(YaBrowser)/(\\d+)\\.(\\d+)\\.(\\d+)',
'Yandex Browser',
None,
None,
),
UserAgentParser(
'(Chrome)/(\\d+)\\.(\\d+)\\.(\\d+).* MRCHROME',
'Mail.ru Chromium Browser',
None,
None,
),
UserAgentParser(
'(AOL) (\\d+)\\.(\\d+); AOLBuild (\\d+)',
None,
None,
None,
),
UserAgentParser(
'(AdobeAIR|FireWeb|Jasmine|ANTGalio|Midori|Fresco|Lobo|PaleMoon|Maxthon|Lynx|OmniWeb|Dillo|Camino|Demeter|Fluid|Fennec|Epiphany|Shiira|Sunrise|Spotify|Flock|Netscape|Lunascape|WebPilot|NetFront|Netfront|Konqueror|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|Opera Mini|iCab|NetNewsWire|ThunderBrowse|Iris|UP\\.Browser|Bunjalloo|Google Earth|Raven for Mac|Openwave)/(\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'Microsoft Office Outlook 12\\.\\d+\\.\\d+|MSOffice 12',
'Outlook',
'2007',
None,
),
UserAgentParser(
'Microsoft Outlook 14\\.\\d+\\.\\d+|MSOffice 14',
'Outlook',
'2010',
None,
),
UserAgentParser(
'Microsoft Outlook 15\\.\\d+\\.\\d+',
'Outlook',
'2013',
None,
),
UserAgentParser(
'Microsoft Outlook (?:Mail )?16\\.\\d+\\.\\d+',
'Outlook',
'2016',
None,
),
UserAgentParser(
'(Airmail) (\\d+)\\.(\\d+)(?:\\.(\\d+))?',
None,
None,
None,
),
UserAgentParser(
'(Thunderbird)/(\\d+)\\.(\\d+)\\.(\\d+(?:pre)?)',
'Thunderbird',
None,
None,
),
UserAgentParser(
'(Vivaldi)/(\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Edge)/(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(brave)/(\\d+)\\.(\\d+)\\.(\\d+) Chrome',
'Brave',
None,
None,
),
UserAgentParser(
'(Chromium|Chrome)/(\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'\\b(Dolphin)(?: |HDCN/|/INT\\-)(\\d+)\\.(\\d+)\\.?(\\d+)?',
None,
None,
None,
),
UserAgentParser(
'(bingbot|Bolt|Jasmine|IceCat|Skyfire|Midori|Maxthon|Lynx|Arora|IBrowse|Dillo|Camino|Shiira|Fennec|Phoenix|Chrome|Flock|Netscape|Lunascape|Epiphany|WebPilot|Opera Mini|Opera|NetFront|Netfront|Konqueror|Googlebot|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|iCab|iTunes|MacAppStore|NetNewsWire|Space Bison|Stainless|Orca|Dolfin|BOLT|Minimo|Tizen Browser|Polaris|Abrowser|Planetweb|ICE Browser|mDolphin|qutebrowser|Otter|QupZilla)/(\\d+)\\.(\\d+)\\.?(\\d+)?',
None,
None,
None,
),
UserAgentParser(
'(Chromium|Chrome)/(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(IEMobile)[ /](\\d+)\\.(\\d+)',
'IE Mobile',
None,
None,
),
UserAgentParser(
'(iRider|Crazy Browser|SkipStone|iCab|Lunascape|Sleipnir|Maemo Browser) (\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(iCab|Lunascape|Opera|Android|Jasmine|Polaris) (\\d+)\\.(\\d+)\\.?(\\d+)?',
None,
None,
None,
),
UserAgentParser(
'(Kindle)/(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Android) Donut',
None,
'1',
'2',
),
UserAgentParser(
'(Android) Eclair',
None,
'2',
'1',
),
UserAgentParser(
'(Android) Froyo',
None,
'2',
'2',
),
UserAgentParser(
'(Android) Gingerbread',
None,
'2',
'3',
),
UserAgentParser(
'(Android) Honeycomb',
None,
'3',
None,
),
UserAgentParser(
'(MSIE) (\\d+)\\.(\\d+).*XBLWP7',
'IE Large Screen',
None,
None,
),
UserAgentParser(
'(Obigo)InternetBrowser',
None,
None,
None,
),
UserAgentParser(
'(Obigo)\\-Browser',
None,
None,
None,
),
UserAgentParser(
'(Obigo|OBIGO)[^\\d]*(\\d+)(?:.(\\d+))?',
'Obigo',
None,
None,
),
UserAgentParser(
'(MAXTHON|Maxthon) (\\d+)\\.(\\d+)',
'Maxthon',
None,
None,
),
UserAgentParser(
'(Maxthon|MyIE2|Uzbl|Shiira)',
None,
'0',
None,
),
UserAgentParser(
'(BrowseX) \\((\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(NCSA_Mosaic)/(\\d+)\\.(\\d+)',
'NCSA Mosaic',
None,
None,
),
UserAgentParser(
'(POLARIS)/(\\d+)\\.(\\d+)',
'Polaris',
None,
None,
),
UserAgentParser(
'(Embider)/(\\d+)\\.(\\d+)',
'Polaris',
None,
None,
),
UserAgentParser(
'(BonEcho)/(\\d+)\\.(\\d+)\\.?([ab]?\\d+)?',
'Bon Echo',
None,
None,
),
UserAgentParser(
'(iPod|iPhone|iPad).+Version/(\\d+)\\.(\\d+)(?:\\.(\\d+))?.* Safari',
'Mobile Safari',
None,
None,
),
UserAgentParser(
'(iPod|iPhone|iPad).+Version/(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
'Mobile Safari UI/WKWebView',
None,
None,
),
UserAgentParser(
'(iPod|iPhone|iPad);.*CPU.*OS (\\d+)_(\\d+)(?:_(\\d+))?.*Mobile.* Safari',
'Mobile Safari',
None,
None,
),
UserAgentParser(
'(iPod|iPhone|iPad);.*CPU.*OS (\\d+)_(\\d+)(?:_(\\d+))?.*Mobile',
'Mobile Safari UI/WKWebView',
None,
None,
),
UserAgentParser(
'(iPod|iPhone|iPad).* Safari',
'Mobile Safari',
None,
None,
),
UserAgentParser(
'(iPod|iPhone|iPad)',
'Mobile Safari UI/WKWebView',
None,
None,
),
UserAgentParser(
'(AvantGo) (\\d+).(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(OneBrowser)/(\\d+).(\\d+)',
'ONE Browser',
None,
None,
),
UserAgentParser(
'(Avant)',
None,
'1',
None,
),
UserAgentParser(
'(QtCarBrowser)',
None,
'1',
None,
),
UserAgentParser(
'^(iBrowser/Mini)(\\d+).(\\d+)',
'iBrowser Mini',
None,
None,
),
UserAgentParser(
'^(iBrowser|iRAPP)/(\\d+).(\\d+)',
None,
None,
None,
),
UserAgentParser(
'^(Nokia)',
'Nokia Services (WAP) Browser',
None,
None,
),
UserAgentParser(
'(NokiaBrowser)/(\\d+)\\.(\\d+).(\\d+)\\.(\\d+)',
'Nokia Browser',
None,
None,
),
UserAgentParser(
'(NokiaBrowser)/(\\d+)\\.(\\d+).(\\d+)',
'Nokia Browser',
None,
None,
),
UserAgentParser(
'(NokiaBrowser)/(\\d+)\\.(\\d+)',
'Nokia Browser',
None,
None,
),
UserAgentParser(
'(BrowserNG)/(\\d+)\\.(\\d+).(\\d+)',
'Nokia Browser',
None,
None,
),
UserAgentParser(
'(Series60)/5\\.0',
'Nokia Browser',
'7',
'0',
),
UserAgentParser(
'(Series60)/(\\d+)\\.(\\d+)',
'Nokia OSS Browser',
None,
None,
),
UserAgentParser(
'(S40OviBrowser)/(\\d+)\\.(\\d+)\\.(\\d+)\\.(\\d+)',
'Ovi Browser',
None,
None,
),
UserAgentParser(
'(Nokia)[EN]?(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(PlayBook).+RIM Tablet OS (\\d+)\\.(\\d+)\\.(\\d+)',
'BlackBerry WebKit',
None,
None,
),
UserAgentParser(
'(Black[bB]erry|BB10).+Version/(\\d+)\\.(\\d+)\\.(\\d+)',
'BlackBerry WebKit',
None,
None,
),
UserAgentParser(
'(Black[bB]erry)\\s?(\\d+)',
'BlackBerry',
None,
None,
),
UserAgentParser(
'(OmniWeb)/v(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Blazer)/(\\d+)\\.(\\d+)',
'Palm Blazer',
None,
None,
),
UserAgentParser(
'(Pre)/(\\d+)\\.(\\d+)',
'Palm Pre',
None,
None,
),
UserAgentParser(
'(ELinks)/(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(ELinks) \\((\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Links) \\((\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(QtWeb) Internet Browser/(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(PhantomJS)/(\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(AppleWebKit)/(\\d+)\\.?(\\d+)?\\+ .* Safari',
'WebKit Nightly',
None,
None,
),
UserAgentParser(
'(Version)/(\\d+)\\.(\\d+)(?:\\.(\\d+))?.*Safari/',
'Safari',
None,
None,
),
UserAgentParser(
'(Safari)/\\d+',
None,
None,
None,
),
UserAgentParser(
'(OLPC)/Update(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(OLPC)/Update()\\.(\\d+)',
None,
'0',
None,
),
UserAgentParser(
'(SEMC\\-Browser)/(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Teleca)',
'Teleca Browser',
None,
None,
),
UserAgentParser(
'(Phantom)/V(\\d+)\\.(\\d+)',
'Phantom Browser',
None,
None,
),
UserAgentParser(
'Trident(.*)rv.(\\d+)\\.(\\d+)',
'IE',
None,
None,
),
UserAgentParser(
'(Espial)/(\\d+)(?:\\.(\\d+))?(?:\\.(\\d+))?',
None,
None,
None,
),
UserAgentParser(
'(AppleWebKit)/(\\d+)\\.(\\d+)\\.(\\d+)',
'AppleMail',
None,
None,
),
UserAgentParser(
'(Firefox)/(\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
),
UserAgentParser(
'(Firefox)/(\\d+)\\.(\\d+)(pre|[ab]\\d+[a-z]*)?',
None,
None,
None,
),
UserAgentParser(
'([MS]?IE) (\\d+)\\.(\\d+)',
'IE',
None,
None,
),
UserAgentParser(
'(python-requests)/(\\d+)\\.(\\d+)',
'Python Requests',
None,
None,
),
UserAgentParser(
'(Java)[/ ]{0,1}\\d+\\.(\\d+)\\.(\\d+)[_-]*([a-zA-Z0-9]+)*',
None,
None,
None,
),
UserAgentParser(
'^(Roku)/DVP-(\\d+)\\.(\\d+)',
None,
None,
None,
),
]
DEVICE_PARSERS = [
DeviceParser(
'(?:(?:iPhone|Windows CE|Android).*(?:(?:Bot|Yeti)-Mobile|YRSpider|bots?/\\d|(?:bot|spider)\\.html)|AdsBot-Google-Mobile.*iPhone)',
'i',
'Spider',
'Spider',
'Smartphone',
),
DeviceParser(
'(?:DoCoMo|\\bMOT\\b|\\bLG\\b|Nokia|Samsung|SonyEricsson).*(?:(?:Bot|Yeti)-Mobile|bots?/\\d|(?:bot|crawler)\\.html|(?:jump|google|Wukong)bot|ichiro/mobile|/spider|YahooSeeker)',
'i',
'Spider',
'Spider',
'Feature Phone',
),
DeviceParser(
'\\bSmartWatch *\\( *([^;]+) *; *([^;]+) *;',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'Android Application[^\\-]+ - (Sony) ?(Ericsson)? (.+) \\w+ - ',
None,
'$1 $2',
'$1$2',
'$3',
),
DeviceParser(
'Android Application[^\\-]+ - (?:HTC|HUAWEI|LGE|LENOVO|MEDION|TCT) (HTC|HUAWEI|LG|LENOVO|MEDION|ALCATEL)[ _\\-](.+) \\w+ - ',
'i',
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'Android Application[^\\-]+ - ([^ ]+) (.+) \\w+ - ',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *([BLRQ]C\\d{4}[A-Z]+) +Build/',
None,
'3Q $1',
'3Q',
'$1',
),
DeviceParser(
'; *(?:3Q_)([^;/]+) +Build',
None,
'3Q $1',
'3Q',
'$1',
),
DeviceParser(
'Android [34].*; *(A100|A101|A110|A200|A210|A211|A500|A501|A510|A511|A700(?: Lite| 3G)?|A701|B1-A71|A1-\\d{3}|B1-\\d{3}|V360|V370|W500|W500P|W501|W501P|W510|W511|W700|Slider SL101|DA22[^;/]+) Build',
None,
'$1',
'Acer',
'$1',
),
DeviceParser(
'; *Acer Iconia Tab ([^;/]+) Build',
None,
'$1',
'Acer',
'$1',
),
DeviceParser(
'; *(Z1[1235]0|E320[^/]*|S500|S510|Liquid[^;/]*|Iconia A\\d+) Build',
None,
'$1',
'Acer',
'$1',
),
DeviceParser(
'; *(Acer |ACER )([^;/]+) Build',
None,
'$1$2',
'Acer',
'$2',
),
DeviceParser(
'; *(Advent )?(Vega(?:Bean|Comb)?).* Build',
None,
'$1$2',
'Advent',
'$2',
),
DeviceParser(
'; *(Ainol )?((?:NOVO|[Nn]ovo)[^;/]+) Build',
None,
'$1$2',
'Ainol',
'$2',
),
DeviceParser(
'; *AIRIS[ _\\-]?([^/;\\)]+) *(?:;|\\)|Build)',
'i',
'$1',
'Airis',
'$1',
),
DeviceParser(
'; *(OnePAD[^;/]+) Build',
'i',
'$1',
'Airis',
'$1',
),
DeviceParser(
'; *Airpad[ \\-]([^;/]+) Build',
None,
'Airpad $1',
'Airpad',
'$1',
),
DeviceParser(
'; *(one ?touch) (EVO7|T10|T20) Build',
None,
'Alcatel One Touch $2',
'Alcatel',
'One Touch $2',
),
DeviceParser(
'; *(?:alcatel[ _])?(?:(?:one[ _]?touch[ _])|ot[ \\-])([^;/]+);? Build',
'i',
'Alcatel One Touch $1',
'Alcatel',
'One Touch $1',
),
DeviceParser(
'; *(TCL)[ _]([^;/]+) Build',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(Vodafone Smart II|Optimus_Madrid) Build',
None,
'Alcatel $1',
'Alcatel',
'$1',
),
DeviceParser(
'; *BASE_Lutea_3 Build',
None,
'Alcatel One Touch 998',
'Alcatel',
'One Touch 998',
),
DeviceParser(
'; *BASE_Varia Build',
None,
'Alcatel One Touch 918D',
'Alcatel',
'One Touch 918D',
),
DeviceParser(
'; *((?:FINE|Fine)\\d[^;/]+) Build',
None,
'$1',
'Allfine',
'$1',
),
DeviceParser(
'; *(ALLVIEW[ _]?|Allview[ _]?)((?:Speed|SPEED).*) Build/',
None,
'$1$2',
'Allview',
'$2',
),
DeviceParser(
'; *(ALLVIEW[ _]?|Allview[ _]?)?(AX1_Shine|AX2_Frenzy) Build',
None,
'$1$2',
'Allview',
'$2',
),
DeviceParser(
'; *(ALLVIEW[ _]?|Allview[ _]?)([^;/]*) Build',
None,
'$1$2',
'Allview',
'$2',
),
DeviceParser(
'; *(A13-MID) Build',
None,
'$1',
'Allwinner',
'$1',
),
DeviceParser(
'; *(Allwinner)[ _\\-]?([^;/]+) Build',
None,
'$1 $2',
'Allwinner',
'$1',
),
DeviceParser(
'; *(A651|A701B?|A702|A703|A705|A706|A707|A711|A712|A713|A717|A722|A785|A801|A802|A803|A901|A902|A1002|A1003|A1006|A1007|A9701|A9703|Q710|Q80) Build',
None,
'$1',
'Amaway',
'$1',
),
DeviceParser(
'; *(?:AMOI|Amoi)[ _]([^;/]+) Build',
None,
'Amoi $1',
'Amoi',
'$1',
),
DeviceParser(
'^(?:AMOI|Amoi)[ _]([^;/]+) Linux',
None,
'Amoi $1',
'Amoi',
'$1',
),
DeviceParser(
'; *(MW(?:0[789]|10)[^;/]+) Build',
None,
'$1',
'Aoc',
'$1',
),
DeviceParser(
'; *(G7|M1013|M1015G|M11[CG]?|M-?12[B]?|M15|M19[G]?|M30[ACQ]?|M31[GQ]|M32|M33[GQ]|M36|M37|M38|M701T|M710|M712B|M713|M715G|M716G|M71(?:G|GS|T)?|M72[T]?|M73[T]?|M75[GT]?|M77G|M79T|M7L|M7LN|M81|M810|M81T|M82|M92|M92KS|M92S|M717G|M721|M722G|M723|M725G|M739|M785|M791|M92SK|M93D) Build',
None,
'Aoson $1',
'Aoson',
'$1',
),
DeviceParser(
'; *Aoson ([^;/]+) Build',
'i',
'Aoson $1',
'Aoson',
'$1',
),
DeviceParser(
'; *[Aa]panda[ _\\-]([^;/]+) Build',
None,
'Apanda $1',
'Apanda',
'$1',
),
DeviceParser(
'; *(?:ARCHOS|Archos) ?(GAMEPAD.*?)(?: Build|[;/\\(\\)\\-])',
None,
'Archos $1',
'Archos',
'$1',
),
DeviceParser(
'ARCHOS; GOGI; ([^;]+);',
None,
'Archos $1',
'Archos',
'$1',
),
DeviceParser(
'(?:ARCHOS|Archos)[ _]?(.*?)(?: Build|[;/\\(\\)\\-]|$)',
None,
'Archos $1',
'Archos',
'$1',
),
DeviceParser(
'; *(AN(?:7|8|9|10|13)[A-Z0-9]{1,4}) Build',
None,
'Archos $1',
'Archos',
'$1',
),
DeviceParser(
'; *(A28|A32|A43|A70(?:BHT|CHT|HB|S|X)|A101(?:B|C|IT)|A7EB|A7EB-WK|101G9|80G9) Build',
None,
'Archos $1',
'Archos',
'$1',
),
DeviceParser(
'; *(PAD-FMD[^;/]+) Build',
None,
'$1',
'Arival',
'$1',
),
DeviceParser(
'; *(BioniQ) ?([^;/]+) Build',
None,
'$1 $2',
'Arival',
'$1 $2',
),
DeviceParser(
'; *(AN\\d[^;/]+|ARCHM\\d+) Build',
None,
'Arnova $1',
'Arnova',
'$1',
),
DeviceParser(
'; *(?:ARNOVA|Arnova) ?([^;/]+) Build',
None,
'Arnova $1',
'Arnova',
'$1',
),
DeviceParser(
'; *(?:ASSISTANT )?(AP)-?([1789]\\d{2}[A-Z]{0,2}|80104) Build',
None,
'Assistant $1-$2',
'Assistant',
'$1-$2',
),
DeviceParser(
'; *(ME17\\d[^;/]*|ME3\\d{2}[^;/]+|K00[A-Z]|Nexus 10|Nexus 7(?: 2013)?|PadFone[^;/]*|Transformer[^;/]*|TF\\d{3}[^;/]*|eeepc) Build',
None,
'Asus $1',
'Asus',
'$1',
),
DeviceParser(
'; *ASUS[ _]*([^;/]+) Build',
None,
'Asus $1',
'Asus',
'$1',
),
DeviceParser(
'; *Garmin-Asus ([^;/]+) Build',
None,
'Garmin-Asus $1',
'Garmin-Asus',
'$1',
),
DeviceParser(
'; *(Garminfone) Build',
None,
'Garmin $1',
'Garmin-Asus',
'$1',
),
DeviceParser(
'; (@TAB-[^;/]+) Build',
None,
'$1',
'Attab',
'$1',
),
DeviceParser(
'; *(T-(?:07|[^0]\\d)[^;/]+) Build',
None,
'$1',
'Audiosonic',
'$1',
),
DeviceParser(
'; *(?:Axioo[ _\\-]([^;/]+)|(picopad)[ _\\-]([^;/]+)) Build',
'i',
'Axioo $1$2 $3',
'Axioo',
'$1$2 $3',
),
DeviceParser(
'; *(V(?:100|700|800)[^;/]*) Build',
None,
'$1',
'Azend',
'$1',
),
DeviceParser(
'; *(IBAK\\-[^;/]*) Build',
'i',
'$1',
'Bak',
'$1',
),
DeviceParser(
'; *(HY5001|HY6501|X12|X21|I5) Build',
None,
'Bedove $1',
'Bedove',
'$1',
),
DeviceParser(
'; *(JC-[^;/]*) Build',
None,
'Benss $1',
'Benss',
'$1',
),
DeviceParser(
'; *(BB) ([^;/]+) Build',
None,
'$1 $2',
'Blackberry',
'$2',
),
DeviceParser(
'; *(BlackBird)[ _](I8.*) Build',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(BlackBird)[ _](.*) Build',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *([0-9]+BP[EM][^;/]*|Endeavour[^;/]+) Build',
None,
'Blaupunkt $1',
'Blaupunkt',
'$1',
),
DeviceParser(
'; *((?:BLU|Blu)[ _\\-])([^;/]+) Build',
None,
'$1$2',
'Blu',
'$2',
),
DeviceParser(
'; *(?:BMOBILE )?(Blu|BLU|DASH [^;/]+|VIVO 4\\.3|TANK 4\\.5) Build',
None,
'$1',
'Blu',
'$1',
),
DeviceParser(
'; *(TOUCH\\d[^;/]+) Build',
None,
'$1',
'Blusens',
'$1',
),
DeviceParser(
'; *(AX5\\d+) Build',
None,
'$1',
'Bmobile',
'$1',
),
DeviceParser(
'; *([Bb]q) ([^;/]+);? Build',
None,
'$1 $2',
'bq',
'$2',
),
DeviceParser(
'; *(Maxwell [^;/]+) Build',
None,
'$1',
'bq',
'$1',
),
DeviceParser(
'; *((?:B-Tab|B-TAB) ?\\d[^;/]+) Build',
None,
'$1',
'Braun',
'$1',
),
DeviceParser(
'; *(Broncho) ([^;/]+) Build',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *CAPTIVA ([^;/]+) Build',
None,
'Captiva $1',
'Captiva',
'$1',
),
DeviceParser(
'; *(C771|CAL21|IS11CA) Build',
None,
'$1',
'Casio',
'$1',
),
DeviceParser(
'; *(?:Cat|CAT) ([^;/]+) Build',
None,
'Cat $1',
'Cat',
'$1',
),
DeviceParser(
'; *(?:Cat)(Nova.*) Build',
None,
'Cat $1',
'Cat',
'$1',
),
DeviceParser(
'; *(INM8002KP|ADM8000KP_[AB]) Build',
None,
'$1',
'Cat',
'Tablet PHOENIX 8.1J0',
),
DeviceParser(
'; *(?:[Cc]elkon[ _\\*]|CELKON[ _\\*])([^;/\\)]+) ?(?:Build|;|\\))',
None,
'$1',
'Celkon',
'$1',
),
DeviceParser(
'Build/(?:[Cc]elkon)+_?([^;/_\\)]+)',
None,
'$1',
'Celkon',
'$1',
),
DeviceParser(
'; *(CT)-?(\\d+) Build',
None,
'$1$2',
'Celkon',
'$1$2',
),
DeviceParser(
'; *(A19|A19Q|A105|A107[^;/\\)]*) ?(?:Build|;|\\))',
None,
'$1',
'Celkon',
'$1',
),
DeviceParser(
'; *(TPC[0-9]{4,5}) Build',
None,
'$1',
'ChangJia',
'$1',
),
DeviceParser(
'; *(Cloudfone)[ _](Excite)([^ ][^;/]+) Build',
None,
'$1 $2 $3',
'Cloudfone',
'$1 $2 $3',
),
DeviceParser(
'; *(Excite|ICE)[ _](\\d+[^;/]+) Build',
None,
'Cloudfone $1 $2',
'Cloudfone',
'Cloudfone $1 $2',
),
DeviceParser(
'; *(Cloudfone|CloudPad)[ _]([^;/]+) Build',
None,
'$1 $2',
'Cloudfone',
'$1 $2',
),
DeviceParser(
'; *((?:Aquila|Clanga|Rapax)[^;/]+) Build',
'i',
'$1',
'Cmx',
'$1',
),
DeviceParser(
'; *(?:CFW-|Kyros )?(MID[0-9]{4}(?:[ABC]|SR|TV)?)(\\(3G\\)-4G| GB 8K| 3G| 8K| GB)? *(?:Build|[;\\)])',
None,
'CobyKyros $1$2',
'CobyKyros',
'$1$2',
),
DeviceParser(
'; *([^;/]*)Coolpad[ _]([^;/]+) Build',
None,
'$1$2',
'Coolpad',
'$1$2',
),
DeviceParser(
'; *(CUBE[ _])?([KU][0-9]+ ?GT.*|A5300) Build',
'i',
'$1$2',
'Cube',
'$2',
),
DeviceParser(
'; *CUBOT ([^;/]+) Build',
'i',
'$1',
'Cubot',
'$1',
),
DeviceParser(
'; *(BOBBY) Build',
'i',
'$1',
'Cubot',
'$1',
),
DeviceParser(
'; *(Dslide [^;/]+) Build',
None,
'$1',
'Danew',
'$1',
),
DeviceParser(
'; *(XCD)[ _]?(28|35) Build',
None,
'Dell $1$2',
'Dell',
'$1$2',
),
DeviceParser(
'; *(001DL) Build',
None,
'Dell $1',
'Dell',
'Streak',
),
DeviceParser(
'; *(?:Dell|DELL) (Streak) Build',
None,
'Dell $1',
'Dell',
'Streak',
),
DeviceParser(
'; *(101DL|GS01|Streak Pro[^;/]*) Build',
None,
'Dell $1',
'Dell',
'Streak Pro',
),
DeviceParser(
'; *([Ss]treak ?7) Build',
None,
'Dell $1',
'Dell',
'Streak 7',
),
DeviceParser(
'; *(Mini-3iX) Build',
None,
'Dell $1',
'Dell',
'$1',
),
DeviceParser(
'; *(?:Dell|DELL)[ _](Aero|Venue|Thunder|Mini.*|Streak[ _]Pro) Build',
None,
'Dell $1',
'Dell',
'$1',
),
DeviceParser(
'; *Dell[ _]([^;/]+) Build',
None,
'Dell $1',
'Dell',
'$1',
),
DeviceParser(
'; *Dell ([^;/]+) Build',
None,
'Dell $1',
'Dell',
'$1',
),
DeviceParser(
'; *(TA[CD]-\\d+[^;/]*) Build',
None,
'$1',
'Denver',
'$1',
),
DeviceParser(
'; *(iP[789]\\d{2}(?:-3G)?|IP10\\d{2}(?:-8GB)?) Build',
None,
'$1',
'Dex',
'$1',
),
DeviceParser(
'; *(AirTab)[ _\\-]([^;/]+) Build',
None,
'$1 $2',
'DNS',
'$1 $2',
),
DeviceParser(
'; *(F\\-\\d[^;/]+) Build',
None,
'$1',
'Fujitsu',
'$1',
),
DeviceParser(
'; *(HT-03A) Build',
None,
'$1',
'HTC',
'Magic',
),
DeviceParser(
'; *(HT\\-\\d[^;/]+) Build',
None,
'$1',
'HTC',
'$1',
),
DeviceParser(
'; *(L\\-\\d[^;/]+) Build',
None,
'$1',
'LG',
'$1',
),
DeviceParser(
'; *(N\\-\\d[^;/]+) Build',
None,
'$1',
'Nec',
'$1',
),
DeviceParser(
'; *(P\\-\\d[^;/]+) Build',
None,
'$1',
'Panasonic',
'$1',
),
DeviceParser(
'; *(SC\\-\\d[^;/]+) Build',
None,
'$1',
'Samsung',
'$1',
),
DeviceParser(
'; *(SH\\-\\d[^;/]+) Build',
None,
'$1',
'Sharp',
'$1',
),
DeviceParser(
'; *(SO\\-\\d[^;/]+) Build',
None,
'$1',
'SonyEricsson',
'$1',
),
DeviceParser(
'; *(T\\-0[12][^;/]+) Build',
None,
'$1',
'Toshiba',
'$1',
),
DeviceParser(
'; *(DOOV)[ _]([^;/]+) Build',
None,
'$1 $2',
'DOOV',
'$2',
),
DeviceParser(
'; *(Enot|ENOT)[ -]?([^;/]+) Build',
None,
'$1 $2',
'Enot',
'$2',
),
DeviceParser(
'; *[^;/]+ Build/(?:CROSS|Cross)+[ _\\-]([^\\)]+)',
None,
'CROSS $1',
'Evercoss',
'Cross $1',
),
DeviceParser(
'; *(CROSS|Cross)[ _\\-]([^;/]+) Build',
None,
'$1 $2',
'Evercoss',
'Cross $2',
),
DeviceParser(
'; *Explay[_ ](.+?)(?:[\\)]| Build)',
None,
'$1',
'Explay',
'$1',
),
DeviceParser(
'; *(IQ.*) Build',
None,
'$1',
'Fly',
'$1',
),
DeviceParser(
'; *(Fly|FLY)[ _](IQ[^;]+|F[34]\\d+[^;]*);? Build',
None,
'$1 $2',
'Fly',
'$2',
),
DeviceParser(
'; *(M532|Q572|FJL21) Build/',
None,
'$1',
'Fujitsu',
'$1',
),
DeviceParser(
'; *(G1) Build',
None,
'$1',
'Galapad',
'$1',
),
DeviceParser(
'; *(Geeksphone) ([^;/]+) Build',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(G[^F]?FIVE) ([^;/]+) Build',
None,
'$1 $2',
'Gfive',
'$2',
),
DeviceParser(
'; *(Gionee)[ _\\-]([^;/]+)(?:/[^;/]+)? Build',
'i',
'$1 $2',
'Gionee',
'$2',
),
DeviceParser(
'; *(GN\\d+[A-Z]?|INFINITY_PASSION|Ctrl_V1) Build',
None,
'Gionee $1',
'Gionee',
'$1',
),
DeviceParser(
'; *(E3) Build/JOP40D',
None,
'Gionee $1',
'Gionee',
'$1',
),
DeviceParser(
'; *((?:FONE|QUANTUM|INSIGNIA) \\d+[^;/]*|PLAYTAB) Build',
None,
'GoClever $1',
'GoClever',
'$1',
),
DeviceParser(
'; *GOCLEVER ([^;/]+) Build',
None,
'GoClever $1',
'GoClever',
'$1',
),
DeviceParser(
'; *(Glass \\d+) Build',
None,
'$1',
'Google',
'$1',
),
DeviceParser(
'; *(GSmart)[ -]([^/]+) Build',
None,
'$1 $2',
'Gigabyte',
'$1 $2',
),
DeviceParser(
'; *(imx5[13]_[^/]+) Build',
None,
'Freescale $1',
'Freescale',
'$1',
),
DeviceParser(
'; *Haier[ _\\-]([^/]+) Build',
None,
'Haier $1',
'Haier',
'$1',
),
DeviceParser(
'; *(PAD1016) Build',
None,
'Haipad $1',
'Haipad',
'$1',
),
DeviceParser(
'; *(M701|M7|M8|M9) Build',
None,
'Haipad $1',
'Haipad',
'$1',
),
DeviceParser(
'; *(SN\\d+T[^;\\)/]*)(?: Build|[;\\)])',
None,
'Hannspree $1',
'Hannspree',
'$1',
),
DeviceParser(
'Build/HCL ME Tablet ([^;\\)]+)[\\);]',
None,
'HCLme $1',
'HCLme',
'$1',
),
DeviceParser(
'; *([^;\\/]+) Build/HCL',
None,
'HCLme $1',
'HCLme',
'$1',
),
DeviceParser(
'; *(MID-?\\d{4}C[EM]) Build',
None,
'Hena $1',
'Hena',
'$1',
),
DeviceParser(
'; *(EG\\d{2,}|HS-[^;/]+|MIRA[^;/]+) Build',
None,
'Hisense $1',
'Hisense',
'$1',
),
DeviceParser(
'; *(andromax[^;/]+) Build',
'i',
'Hisense $1',
'Hisense',
'$1',
),
DeviceParser(
'; *(?:AMAZE[ _](S\\d+)|(S\\d+)[ _]AMAZE) Build',
None,
'AMAZE $1$2',
'hitech',
'AMAZE $1$2',
),
DeviceParser(
'; *(PlayBook) Build',
None,
'HP $1',
'HP',
'$1',
),
DeviceParser(
'; *HP ([^/]+) Build',
None,
'HP $1',
'HP',
'$1',
),
DeviceParser(
'; *([^/]+_tenderloin) Build',
None,
'HP TouchPad',
'HP',
'TouchPad',
),
DeviceParser(
'; *(HUAWEI |Huawei-)?([UY][^;/]+) Build/(?:Huawei|HUAWEI)([UY][^\\);]+)\\)',
None,
'$1$2',
'Huawei',
'$2',
),
DeviceParser(
'; *([^;/]+) Build[/ ]Huawei(MT1-U06|[A-Z]+\\d+[^\\);]+)[^\\);]*\\)',
None,
'$1',
'Huawei',
'$2',
),
DeviceParser(
'; *(S7|M860) Build',
None,
'$1',
'Huawei',
'$1',
),
DeviceParser(
'; *((?:HUAWEI|Huawei)[ \\-]?)(MediaPad) Build',
None,
'$1$2',
'Huawei',
'$2',
),
DeviceParser(
'; *((?:HUAWEI[ _]?|Huawei[ _])?Ascend[ _])([^;/]+) Build',
None,
'$1$2',
'Huawei',
'$2',
),
DeviceParser(
'; *((?:HUAWEI|Huawei)[ _\\-]?)((?:G700-|MT-)[^;/]+) Build',
None,
'$1$2',
'Huawei',
'$2',
),
DeviceParser(
'; *((?:HUAWEI|Huawei)[ _\\-]?)([^;/]+) Build',
None,
'$1$2',
'Huawei',
'$2',
),
DeviceParser(
'; *(MediaPad[^;]+|SpringBoard) Build/Huawei',
None,
'$1',
'Huawei',
'$1',
),
DeviceParser(
'; *([^;]+) Build/Huawei',
None,
'$1',
'Huawei',
'$1',
),
DeviceParser(
'; *([Uu])([89]\\d{3}) Build',
None,
'$1$2',
'Huawei',
'U$2',
),
DeviceParser(
'; *(?:Ideos |IDEOS )(S7) Build',
None,
'Huawei Ideos$1',
'Huawei',
'Ideos$1',
),
DeviceParser(
'; *(?:Ideos |IDEOS )([^;/]+\\s*|\\s*)Build',
None,
'Huawei Ideos$1',
'Huawei',
'Ideos$1',
),
DeviceParser(
'; *(Orange Daytona|Pulse|Pulse Mini|Vodafone 858|C8500|C8600|C8650|C8660) Build',
None,
'Huawei $1',
'Huawei',
'$1',
),
DeviceParser(
'; *HTC[ _]([^;]+); Windows Phone',
None,
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'; *(?:HTC[ _/])+([^ _/]+)(?:[/\\\\]1\\.0 | V|/| +)\\d+\\.\\d[\\d\\.]*(?: *Build|\\))',
None,
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'; *(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+))?(?:[/\\\\]1\\.0 | V|/| +)\\d+\\.\\d[\\d\\.]*(?: *Build|\\))',
None,
'HTC $1 $2',
'HTC',
'$1 $2',
),
DeviceParser(
'; *(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+))?)?(?:[/\\\\]1\\.0 | V|/| +)\\d+\\.\\d[\\d\\.]*(?: *Build|\\))',
None,
'HTC $1 $2 $3',
'HTC',
'$1 $2 $3',
),
DeviceParser(
'; *(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+))?)?)?(?:[/\\\\]1\\.0 | V|/| +)\\d+\\.\\d[\\d\\.]*(?: *Build|\\))',
None,
'HTC $1 $2 $3 $4',
'HTC',
'$1 $2 $3 $4',
),
DeviceParser(
'; *(?:(?:HTC|htc)(?:_blocked)*[ _/])+([^ _/;]+)(?: *Build|[;\\)]| - )',
None,
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'; *(?:(?:HTC|htc)(?:_blocked)*[ _/])+([^ _/]+)(?:[ _/]([^ _/;\\)]+))?(?: *Build|[;\\)]| - )',
None,
'HTC $1 $2',
'HTC',
'$1 $2',
),
DeviceParser(
'; *(?:(?:HTC|htc)(?:_blocked)*[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/;\\)]+))?)?(?: *Build|[;\\)]| - )',
None,
'HTC $1 $2 $3',
'HTC',
'$1 $2 $3',
),
DeviceParser(
'; *(?:(?:HTC|htc)(?:_blocked)*[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ /;]+))?)?)?(?: *Build|[;\\)]| - )',
None,
'HTC $1 $2 $3 $4',
'HTC',
'$1 $2 $3 $4',
),
DeviceParser(
'HTC Streaming Player [^\\/]*/[^\\/]*/ htc_([^/]+) /',
None,
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'(?:[;,] *|^)(?:htccn_chs-)?HTC[ _-]?([^;]+?)(?: *Build|clay|Android|-?Mozilla| Opera| Profile| UNTRUSTED|[;/\\(\\)]|$)',
'i',
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'; *(A6277|ADR6200|ADR6300|ADR6350|ADR6400[A-Z]*|ADR6425[A-Z]*|APX515CKT|ARIA|Desire[^_ ]*|Dream|EndeavorU|Eris|Evo|Flyer|HD2|Hero|HERO200|Hero CDMA|HTL21|Incredible|Inspire[A-Z0-9]*|Legend|Liberty|Nexus ?(?:One|HD2)|One|One S C2|One[ _]?(?:S|V|X\\+?)\\w*|PC36100|PG06100|PG86100|S31HT|Sensation|Wildfire)(?: Build|[/;\\(\\)])',
'i',
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'; *(ADR6200|ADR6400L|ADR6425LVW|Amaze|DesireS?|EndeavorU|Eris|EVO|Evo\\d[A-Z]+|HD2|IncredibleS?|Inspire[A-Z0-9]*|Inspire[A-Z0-9]*|Sensation[A-Z0-9]*|Wildfire)[ _-](.+?)(?:[/;\\)]|Build|MIUI|1\\.0)',
'i',
'HTC $1 $2',
'HTC',
'$1 $2',
),
DeviceParser(
'; *HYUNDAI (T\\d[^/]*) Build',
None,
'Hyundai $1',
'Hyundai',
'$1',
),
DeviceParser(
'; *HYUNDAI ([^;/]+) Build',
None,
'Hyundai $1',
'Hyundai',
'$1',
),
DeviceParser(
'; *(X700|Hold X|MB-6900) Build',
None,
'Hyundai $1',
'Hyundai',
'$1',
),
DeviceParser(
'; *(?:iBall[ _\\-])?(Andi)[ _]?(\\d[^;/]*) Build',
'i',
'$1 $2',
'iBall',
'$1 $2',
),
DeviceParser(
'; *(IBall)(?:[ _]([^;/]+)|) Build',
'i',
'$1 $2',
'iBall',
'$2',
),
DeviceParser(
'; *(NT-\\d+[^ ;/]*|Net[Tt]AB [^;/]+|Mercury [A-Z]+|iconBIT)(?: S/N:[^;/]+)? Build',
None,
'$1',
'IconBIT',
'$1',
),
DeviceParser(
'; *(IMO)[ _]([^;/]+) Build',
'i',
'$1 $2',
'IMO',
'$2',
),
DeviceParser(
'; *i-?mobile[ _]([^/]+) Build/',
'i',
'i-mobile $1',
'imobile',
'$1',
),
DeviceParser(
'; *(i-(?:style|note)[^/]*) Build/',
'i',
'i-mobile $1',
'imobile',
'$1',
),
DeviceParser(
'; *(ImPAD) ?(\\d+(?:.)*) Build',
None,
'$1 $2',
'Impression',
'$1 $2',
),
DeviceParser(
'; *(Infinix)[ _]([^;/]+) Build',
None,
'$1 $2',
'Infinix',
'$2',
),
DeviceParser(
'; *(Informer)[ \\-]([^;/]+) Build',
None,
'$1 $2',
'Informer',
'$2',
),
DeviceParser(
'; *(TAB) ?([78][12]4) Build',
None,
'Intenso $1',
'Intenso',
'$1 $2',
),
DeviceParser(
'; *(?:Intex[ _])?(AQUA|Aqua)([ _\\.\\-])([^;/]+) *(?:Build|;)',
None,
'$1$2$3',
'Intex',
'$1 $3',
),
DeviceParser(
'; *(?:INTEX|Intex)(?:[_ ]([^\\ _;/]+))(?:[_ ]([^\\ _;/]+))? *(?:Build|;)',
None,
'$1 $2',
'Intex',
'$1 $2',
),
DeviceParser(
'; *([iI]Buddy)[ _]?(Connect)(?:_|\\?_| )?([^;/]*) *(?:Build|;)',
None,
'$1 $2 $3',
'Intex',
'iBuddy $2 $3',
),
DeviceParser(
'; *(I-Buddy)[ _]([^;/]+) *(?:Build|;)',
None,
'$1 $2',
'Intex',
'iBuddy $2',
),
DeviceParser(
'; *(iOCEAN) ([^/]+) Build',
'i',
'$1 $2',
'iOCEAN',
'$2',
),
DeviceParser(
'; *(TP\\d+(?:\\.\\d+)?\\-\\d[^;/]+) Build',
None,
'ionik $1',
'ionik',
'$1',
),
DeviceParser(
'; *(M702pro) Build',
None,
'$1',
'Iru',
'$1',
),
DeviceParser(
'; *(DE88Plus|MD70) Build',
None,
'$1',
'Ivio',
'$1',
),
DeviceParser(
'; *IVIO[_\\-]([^;/]+) Build',
None,
'$1',
'Ivio',
'$1',
),
DeviceParser(
'; *(TPC-\\d+|JAY-TECH) Build',
None,
'$1',
'Jaytech',
'$1',
),
DeviceParser(
'; *(JY-[^;/]+|G[234]S?) Build',
None,
'$1',
'Jiayu',
'$1',
),
DeviceParser(
'; *(JXD)[ _\\-]([^;/]+) Build',
None,
'$1 $2',
'JXD',
'$2',
),
DeviceParser(
'; *Karbonn[ _]?([^;/]+) *(?:Build|;)',
'i',
'$1',
'Karbonn',
'$1',
),
DeviceParser(
'; *([^;]+) Build/Karbonn',
None,
'$1',
'Karbonn',
'$1',
),
DeviceParser(
'; *(A11|A39|A37|A34|ST8|ST10|ST7|Smart Tab3|Smart Tab2|Titanium S\\d) +Build',
None,
'$1',
'Karbonn',
'$1',
),
DeviceParser(
'; *(IS01|IS03|IS05|IS\\d{2}SH) Build',
None,
'$1',
'Sharp',
'$1',
),
DeviceParser(
'; *(IS04) Build',
None,
'$1',
'Regza',
'$1',
),
DeviceParser(
'; *(IS06|IS\\d{2}PT) Build',
None,
'$1',
'Pantech',
'$1',
),
DeviceParser(
'; *(IS11S) Build',
None,
'$1',
'SonyEricsson',
'Xperia Acro',
),
DeviceParser(
'; *(IS11CA) Build',
None,
'$1',
'Casio',
'GzOne $1',
),
DeviceParser(
'; *(IS11LG) Build',
None,
'$1',
'LG',
'Optimus X',
),
DeviceParser(
'; *(IS11N) Build',
None,
'$1',
'Medias',
'$1',
),
DeviceParser(
'; *(IS11PT) Build',
None,
'$1',
'Pantech',
'MIRACH',
),
DeviceParser(
'; *(IS12F) Build',
None,
'$1',
'Fujitsu',
'Arrows ES',
),
DeviceParser(
'; *(IS12M) Build',
None,
'$1',
'Motorola',
'XT909',
),
DeviceParser(
'; *(IS12S) Build',
None,
'$1',
'SonyEricsson',
'Xperia Acro HD',
),
DeviceParser(
'; *(ISW11F) Build',
None,
'$1',
'Fujitsu',
'Arrowz Z',
),
DeviceParser(
'; *(ISW11HT) Build',
None,
'$1',
'HTC',
'EVO',
),
DeviceParser(
'; *(ISW11K) Build',
None,
'$1',
'Kyocera',
'DIGNO',
),
DeviceParser(
'; *(ISW11M) Build',
None,
'$1',
'Motorola',
'Photon',
),
DeviceParser(
'; *(ISW11SC) Build',
None,
'$1',
'Samsung',
'GALAXY S II WiMAX',
),
DeviceParser(
'; *(ISW12HT) Build',
None,
'$1',
'HTC',
'EVO 3D',
),
DeviceParser(
'; *(ISW13HT) Build',
None,
'$1',
'HTC',
'J',
),
DeviceParser(
'; *(ISW?[0-9]{2}[A-Z]{0,2}) Build',
None,
'$1',
'KDDI',
'$1',
),
DeviceParser(
'; *(INFOBAR [^;/]+) Build',
None,
'$1',
'KDDI',
'$1',
),
DeviceParser(
'; *(JOYPAD|Joypad)[ _]([^;/]+) Build/',
None,
'$1 $2',
'Kingcom',
'$1 $2',
),
DeviceParser(
'; *(Vox|VOX|Arc|K080) Build/',
'i',
'$1',
'Kobo',
'$1',
),
DeviceParser(
'\\b(Kobo Touch)\\b',
None,
'$1',
'Kobo',
'$1',
),
DeviceParser(
'; *(K-Touch)[ _]([^;/]+) Build',
'i',
'$1 $2',
'Ktouch',
'$2',
),
DeviceParser(
'; *((?:EV|KM)-S\\d+[A-Z]?) Build',
'i',
'$1',
'KTtech',
'$1',
),
DeviceParser(
'; *(Zio|Hydro|Torque|Event|EVENT|Echo|Milano|Rise|URBANO PROGRESSO|WX04K|WX06K|WX10K|KYL21|101K|C5[12]\\d{2}) Build/',
None,
'$1',
'Kyocera',
'$1',
),
DeviceParser(
'; *(?:LAVA[ _])?IRIS[ _\\-]?([^/;\\)]+) *(?:;|\\)|Build)',
'i',
'Iris $1',
'Lava',
'Iris $1',
),
DeviceParser(
'; *LAVA[ _]([^;/]+) Build',
None,
'$1',
'Lava',
'$1',
),
DeviceParser(
'; *(?:(Aspire A1)|(?:LEMON|Lemon)[ _]([^;/]+))_? Build',
None,
'Lemon $1$2',
'Lemon',
'$1$2',
),
DeviceParser(
'; *(TAB-1012) Build/',
None,
'Lenco $1',
'Lenco',
'$1',
),
DeviceParser(
'; Lenco ([^;/]+) Build/',
None,
'Lenco $1',
'Lenco',
'$1',
),
DeviceParser(
'; *(A1_07|A2107A-H|S2005A-H|S1-37AH0) Build',
None,
'$1',
'Lenovo',
'$1',
),
DeviceParser(
'; *(Idea[Tp]ab)[ _]([^;/]+);? Build',
None,
'Lenovo $1 $2',
'Lenovo',
'$1 $2',
),
DeviceParser(
'; *(Idea(?:Tab|pad)) ?([^;/]+) Build',
None,
'Lenovo $1 $2',
'Lenovo',
'$1 $2',
),
DeviceParser(
'; *(ThinkPad) ?(Tablet) Build/',
None,
'Lenovo $1 $2',
'Lenovo',
'$1 $2',
),
DeviceParser(
'; *(?:LNV-)?(?:=?[Ll]enovo[ _\\-]?|LENOVO[ _])+(.+?)(?:Build|[;/\\)])',
None,
'Lenovo $1',
'Lenovo',
'$1',
),
DeviceParser(
'[;,] (?:Vodafone )?(SmartTab) ?(II) ?(\\d+) Build/',
None,
'Lenovo $1 $2 $3',
'Lenovo',
'$1 $2 $3',
),
DeviceParser(
'; *(?:Ideapad )?K1 Build/',
None,
'Lenovo Ideapad K1',
'Lenovo',
'Ideapad K1',
),
DeviceParser(
'; *(3GC101|3GW10[01]|A390) Build/',
None,
'$1',
'Lenovo',
'$1',
),
DeviceParser(
'\\b(?:Lenovo|LENOVO)+[ _\\-]?([^,;:/ ]+)',
None,
'Lenovo $1',
'Lenovo',
'$1',
),
DeviceParser(
'; *(MFC\\d+)[A-Z]{2}([^;,/]*),? Build',
None,
'$1$2',
'Lexibook',
'$1$2',
),
DeviceParser(
'; *(E[34][0-9]{2}|LS[6-8][0-9]{2}|VS[6-9][0-9]+[^;/]+|Nexus [45]|GT540f?|Optimus (?:2X|G|4X HD)|OptimusX4HD) *(?:Build|;)',
None,
'$1',
'LG',
'$1',
),
DeviceParser(
'[;:] *(L-\\d+[A-Z]|LGL\\d+[A-Z]?)(?:/V\\d+)? *(?:Build|[;\\)])',
None,
'$1',
'LG',
'$1',
),
DeviceParser(
'; *(LG-)([A-Z]{1,2}\\d{2,}[^,;/\\)\\(]*?)(?:Build| V\\d+|[,;/\\)\\(]|$)',
None,
'$1$2',
'LG',
'$2',
),
DeviceParser(
'; *(LG[ \\-]|LG)([^;/]+)[;/]? Build',
None,
'$1$2',
'LG',
'$2',
),
DeviceParser(
'^(LG)-([^;/]+)/ Mozilla/.*; Android',
None,
'$1 $2',
'LG',
'$2',
),
DeviceParser(
'; *((?:SMB|smb)[^;/]+) Build/',
None,
'$1',
'Malata',
'$1',
),
DeviceParser(
'; *(?:Malata|MALATA) ([^;/]+) Build/',
None,
'$1',
'Malata',
'$1',
),
DeviceParser(
'; *(MS[45][0-9]{3}|MID0[568][NS]?|MID[1-9]|MID[78]0[1-9]|MID970[1-9]|MID100[1-9]) Build/',
None,
'$1',
'Manta',
'$1',
),
DeviceParser(
'; *(M1052|M806|M9000|M9100|M9701|MID100|MID120|MID125|MID130|MID135|MID140|MID701|MID710|MID713|MID727|MID728|MID731|MID732|MID733|MID735|MID736|MID737|MID760|MID800|MID810|MID820|MID830|MID833|MID835|MID860|MID900|MID930|MID933|MID960|MID980) Build/',
None,
'$1',
'Match',
'$1',
),
DeviceParser(
'; *(GenxDroid7|MSD7.*|AX\\d.*|Tab 701|Tab 722) Build/',
None,
'Maxx $1',
'Maxx',
'$1',
),
DeviceParser(
'; *(M-PP[^;/]+|PhonePad ?\\d{2,}[^;/]+) Build',
None,
'Mediacom $1',
'Mediacom',
'$1',
),
DeviceParser(
'; *(M-MP[^;/]+|SmartPad ?\\d{2,}[^;/]+) Build',
None,
'Mediacom $1',
'Mediacom',
'$1',
),
DeviceParser(
'; *(?:MD_)?LIFETAB[ _]([^;/]+) Build',
'i',
'Medion Lifetab $1',
'Medion',
'Lifetab $1',
),
DeviceParser(
'; *MEDION ([^;/]+) Build',
None,
'Medion $1',
'Medion',
'$1',
),
DeviceParser(
'; *(M030|M031|M035|M040|M065|m9) Build',
None,
'Meizu $1',
'Meizu',
'$1',
),
DeviceParser(
'; *(?:meizu_|MEIZU )(.+?) *(?:Build|[;\\)])',
None,
'Meizu $1',
'Meizu',
'$1',
),
DeviceParser(
'; *(?:Micromax[ _](A111|A240)|(A111|A240)) Build',
'i',
'Micromax $1$2',
'Micromax',
'$1$2',
),
DeviceParser(
'; *Micromax[ _](A\\d{2,3}[^;/]*) Build',
'i',
'Micromax $1',
'Micromax',
'$1',
),
DeviceParser(
'; *(A\\d{2}|A[12]\\d{2}|A90S|A110Q) Build',
'i',
'Micromax $1',
'Micromax',
'$1',
),
DeviceParser(
'; *Micromax[ _](P\\d{3}[^;/]*) Build',
'i',
'Micromax $1',
'Micromax',
'$1',
),
DeviceParser(
'; *(P\\d{3}|P\\d{3}\\(Funbook\\)) Build',
'i',
'Micromax $1',
'Micromax',
'$1',
),
DeviceParser(
'; *(MITO)[ _\\-]?([^;/]+) Build',
'i',
'$1 $2',
'Mito',
'$2',
),
DeviceParser(
'; *(Cynus)[ _](F5|T\\d|.+?) *(?:Build|[;/\\)])',
'i',
'$1 $2',
'Mobistel',
'$1 $2',
),
DeviceParser(
'; *(MODECOM )?(FreeTab) ?([^;/]+) Build',
'i',
'$1$2 $3',
'Modecom',
'$2 $3',
),
DeviceParser(
'; *(MODECOM )([^;/]+) Build',
'i',
'$1 $2',
'Modecom',
'$2',
),
DeviceParser(
'; *(MZ\\d{3}\\+?|MZ\\d{3} 4G|Xoom|XOOM[^;/]*) Build',
None,
'Motorola $1',
'Motorola',
'$1',
),
DeviceParser(
'; *(Milestone )(XT[^;/]*) Build',
None,
'Motorola $1$2',
'Motorola',
'$2',
),
DeviceParser(
'; *(Motoroi ?x|Droid X|DROIDX) Build',
'i',
'Motorola $1',
'Motorola',
'DROID X',
),
DeviceParser(
'; *(Droid[^;/]*|DROID[^;/]*|Milestone[^;/]*|Photon|Triumph|Devour|Titanium) Build',
None,
'Motorola $1',
'Motorola',
'$1',
),
DeviceParser(
'; *(A555|A85[34][^;/]*|A95[356]|ME[58]\\d{2}\\+?|ME600|ME632|ME722|MB\\d{3}\\+?|MT680|MT710|MT870|MT887|MT917|WX435|WX453|WX44[25]|XT\\d{3,4}[A-Z\\+]*|CL[iI]Q|CL[iI]Q XT) Build',
None,
'$1',
'Motorola',
'$1',
),
DeviceParser(
'; *(Motorola MOT-|Motorola[ _\\-]|MOT\\-?)([^;/]+) Build',
None,
'$1$2',
'Motorola',
'$2',
),
DeviceParser(
'; *(Moto[_ ]?|MOT\\-)([^;/]+) Build',
None,
'$1$2',
'Motorola',
'$2',
),
DeviceParser(
'; *((?:MP[DQ]C|MPG\\d{1,4}|MP\\d{3,4}|MID(?:(?:10[234]|114|43|7[247]|8[24]|7)C|8[01]1))[^;/]*) Build',
None,
'$1',
'Mpman',
'$1',
),
DeviceParser(
'; *(?:MSI[ _])?(Primo\\d+|Enjoy[ _\\-][^;/]+) Build',
'i',
'$1',
'Msi',
'$1',
),
DeviceParser(
'; *Multilaser[ _]([^;/]+) Build',
None,
'$1',
'Multilaser',
'$1',
),
DeviceParser(
'; *(My)[_]?(Pad)[ _]([^;/]+) Build',
None,
'$1$2 $3',
'MyPhone',
'$1$2 $3',
),
DeviceParser(
'; *(My)\\|?(Phone)[ _]([^;/]+) Build',
None,
'$1$2 $3',
'MyPhone',
'$3',
),
DeviceParser(
'; *(A\\d+)[ _](Duo)? Build',
'i',
'$1 $2',
'MyPhone',
'$1 $2',
),
DeviceParser(
'; *(myTab[^;/]*) Build',
None,
'$1',
'Mytab',
'$1',
),
DeviceParser(
'; *(NABI2?-)([^;/]+) Build/',
None,
'$1$2',
'Nabi',
'$2',
),
DeviceParser(
'; *(N-\\d+[CDE]) Build/',
None,
'$1',
'Nec',
'$1',
),
DeviceParser(
'; ?(NEC-)(.*) Build/',
None,
'$1$2',
'Nec',
'$2',
),
DeviceParser(
'; *(LT-NA7) Build/',
None,
'$1',
'Nec',
'Lifetouch Note',
),
DeviceParser(
'; *(NXM\\d+[A-z0-9_]*|Next\\d[A-z0-9_ \\-]*|NEXT\\d[A-z0-9_ \\-]*|Nextbook [A-z0-9_ ]*|DATAM803HC|M805)(?: Build|[\\);])',
None,
'$1',
'Nextbook',
'$1',
),
DeviceParser(
'; *(Nokia)([ _\\-]*)([^;/]*) Build',
'i',
'$1$2$3',
'Nokia',
'$3',
),
DeviceParser(
'; *(Nook ?|Barnes & Noble Nook |BN )([^;/]+) Build',
None,
'$1$2',
'Nook',
'$2',
),
DeviceParser(
'; *(NOOK )?(BNRV200|BNRV200A|BNTV250|BNTV250A|BNTV400|BNTV600|LogicPD Zoom2) Build',
None,
'$1$2',
'Nook',
'$2',
),
DeviceParser(
'; Build/(Nook)',
None,
'$1',
'Nook',
'Tablet',
),
DeviceParser(
'; *(OP110|OliPad[^;/]+) Build',
None,
'Olivetti $1',
'Olivetti',
'$1',
),
DeviceParser(
'; *OMEGA[ _\\-](MID[^;/]+) Build',
None,
'Omega $1',
'Omega',
'$1',
),
DeviceParser(
'^(MID7500|MID\\d+) Mozilla/5\\.0 \\(iPad;',
None,
'Omega $1',
'Omega',
'$1',
),
DeviceParser(
'; *((?:CIUS|cius)[^;/]*) Build',
None,
'Openpeak $1',
'Openpeak',
'$1',
),
DeviceParser(
'; *(Find ?(?:5|7a)|R8[012]\\d{1,2}|T703\\d{0,1}|U70\\d{1,2}T?|X90\\d{1,2}) Build',
None,
'Oppo $1',
'Oppo',
'$1',
),
DeviceParser(
'; *OPPO ?([^;/]+) Build/',
None,
'Oppo $1',
'Oppo',
'$1',
),
DeviceParser(
'; *(?:Odys\\-|ODYS\\-|ODYS )([^;/]+) Build',
None,
'Odys $1',
'Odys',
'$1',
),
DeviceParser(
'; *(SELECT) ?(7) Build',
None,
'Odys $1 $2',
'Odys',
'$1 $2',
),
DeviceParser(
'; *(PEDI)_(PLUS)_(W) Build',
None,
'Odys $1 $2 $3',
'Odys',
'$1 $2 $3',
),
DeviceParser(
'; *(AEON|BRAVIO|FUSION|FUSION2IN1|Genio|EOS10|IEOS[^;/]*|IRON|Loox|LOOX|LOOX Plus|Motion|NOON|NOON_PRO|NEXT|OPOS|PEDI[^;/]*|PRIME[^;/]*|STUDYTAB|TABLO|Tablet-PC-4|UNO_X8|XELIO[^;/]*|Xelio ?\\d+ ?[Pp]ro|XENO10|XPRESS PRO) Build',
None,
'Odys $1',
'Odys',
'$1',
),
DeviceParser(
'; *(TP-\\d+) Build/',
None,
'Orion $1',
'Orion',
'$1',
),
DeviceParser(
'; *(G100W?) Build/',
None,
'PackardBell $1',
'PackardBell',
'$1',
),
DeviceParser(
'; *(Panasonic)[_ ]([^;/]+) Build',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(FZ-A1B|JT-B1) Build',
None,
'Panasonic $1',
'Panasonic',
'$1',
),
DeviceParser(
'; *(dL1|DL1) Build',
None,
'Panasonic $1',
'Panasonic',
'$1',
),
DeviceParser(
'; *(SKY[ _])?(IM\\-[AT]\\d{3}[^;/]+).* Build/',
None,
'Pantech $1$2',
'Pantech',
'$1$2',
),
DeviceParser(
'; *((?:ADR8995|ADR910L|ADR930L|ADR930VW|PTL21|P8000)(?: 4G)?) Build/',
None,
'$1',
'Pantech',
'$1',
),
DeviceParser(
'; *Pantech([^;/]+).* Build/',
None,
'Pantech $1',
'Pantech',
'$1',
),
DeviceParser(
'; *(papyre)[ _\\-]([^;/]+) Build/',
'i',
'$1 $2',
'Papyre',
'$2',
),
DeviceParser(
'; *(?:Touchlet )?(X10\\.[^;/]+) Build/',
None,
'Pearl $1',
'Pearl',
'$1',
),
DeviceParser(
'; PHICOMM (i800) Build/',
None,
'Phicomm $1',
'Phicomm',
'$1',
),
DeviceParser(
'; PHICOMM ([^;/]+) Build/',
None,
'Phicomm $1',
'Phicomm',
'$1',
),
DeviceParser(
'; *(FWS\\d{3}[^;/]+) Build/',
None,
'Phicomm $1',
'Phicomm',
'$1',
),
DeviceParser(
'; *(D633|D822|D833|T539|T939|V726|W335|W336|W337|W3568|W536|W5510|W626|W632|W6350|W6360|W6500|W732|W736|W737|W7376|W820|W832|W8355|W8500|W8510|W930) Build',
None,
'$1',
'Philips',
'$1',
),
DeviceParser(
'; *(?:Philips|PHILIPS)[ _]([^;/]+) Build',
None,
'Philips $1',
'Philips',
'$1',
),
DeviceParser(
'Android 4\\..*; *(M[12356789]|U[12368]|S[123])\\ ?(pro)? Build',
None,
'Pipo $1$2',
'Pipo',
'$1$2',
),
DeviceParser(
'; *(MOMO[^;/]+) Build',
None,
'$1',
'Ployer',
'$1',
),
DeviceParser(
'; *(?:Polaroid[ _])?((?:MIDC\\d{3,}|PMID\\d{2,}|PTAB\\d{3,})[^;/]*)(\\/[^;/]*)? Build/',
None,
'$1',
'Polaroid',
'$1',
),
DeviceParser(
'; *(?:Polaroid )(Tablet) Build/',
None,
'$1',
'Polaroid',
'$1',
),
DeviceParser(
'; *(POMP)[ _\\-](.+?) *(?:Build|[;/\\)])',
None,
'$1 $2',
'Pomp',
'$2',
),
DeviceParser(
'; *(TB07STA|TB10STA|TB07FTA|TB10FTA) Build/',
None,
'$1',
'Positivo',
'$1',
),
DeviceParser(
'; *(?:Positivo )?((?:YPY|Ypy)[^;/]+) Build/',
None,
'$1',
'Positivo',
'$1',
),
DeviceParser(
'; *(MOB-[^;/]+) Build/',
None,
'$1',
'POV',
'$1',
),
DeviceParser(
'; *POV[ _\\-]([^;/]+) Build/',
None,
'POV $1',
'POV',
'$1',
),
DeviceParser(
'; *((?:TAB-PLAYTAB|TAB-PROTAB|PROTAB|PlayTabPro|Mobii[ _\\-]|TAB-P)[^;/]*) Build/',
None,
'POV $1',
'POV',
'$1',
),
DeviceParser(
'; *(?:Prestigio )?((?:PAP|PMP)\\d[^;/]+) Build/',
None,
'Prestigio $1',
'Prestigio',
'$1',
),
DeviceParser(
'; *(PLT[0-9]{4}.*) Build/',
None,
'$1',
'Proscan',
'$1',
),
DeviceParser(
'; *(A2|A5|A8|A900)_?(Classic)? Build',
None,
'$1 $2',
'Qmobile',
'$1 $2',
),
DeviceParser(
'; *(Q[Mm]obile)_([^_]+)_([^_]+) Build',
None,
'Qmobile $2 $3',
'Qmobile',
'$2 $3',
),
DeviceParser(
'; *(Q\\-?[Mm]obile)[_ ](A[^;/]+) Build',
None,
'Qmobile $2',
'Qmobile',
'$2',
),
DeviceParser(
'; *(Q\\-Smart)[ _]([^;/]+) Build/',
None,
'$1 $2',
'Qmobilevn',
'$2',
),
DeviceParser(
'; *(Q\\-?[Mm]obile)[ _\\-](S[^;/]+) Build/',
None,
'$1 $2',
'Qmobilevn',
'$2',
),
DeviceParser(
'; *(TA1013) Build',
None,
'$1',
'Quanta',
'$1',
),
DeviceParser(
'; *(RK\\d+),? Build/',
None,
'$1',
'Rockchip',
'$1',
),
DeviceParser(
' Build/(RK\\d+)',
None,
'$1',
'Rockchip',
'$1',
),
DeviceParser(
'; *(SAMSUNG |Samsung )?((?:Galaxy (?:Note II|S\\d)|GT-I9082|GT-I9205|GT-N7\\d{3}|SM-N9005)[^;/]*)\\/?[^;/]* Build/',
None,
'Samsung $1$2',
'Samsung',
'$2',
),
DeviceParser(
'; *(Google )?(Nexus [Ss](?: 4G)?) Build/',
None,
'Samsung $1$2',
'Samsung',
'$2',
),
DeviceParser(
'; *(SAMSUNG |Samsung )([^\\/]*)\\/[^ ]* Build/',
None,
'Samsung $2',
'Samsung',
'$2',
),
DeviceParser(
'; *(Galaxy(?: Ace| Nexus| S ?II+|Nexus S| with MCR 1.2| Mini Plus 4G)?) Build/',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'; *(SAMSUNG[ _\\-] *)+([^;/]+) Build',
None,
'Samsung $2',
'Samsung',
'$2',
),
DeviceParser(
'; *(SAMSUNG-)?(GT\\-[BINPS]\\d{4}[^\\/]*)(\\/[^ ]*) Build',
None,
'Samsung $1$2$3',
'Samsung',
'$2',
),
DeviceParser(
'(?:; *|^)((?:GT\\-[BIiNPS]\\d{4}|I9\\d{2}0[A-Za-z\\+]?\\b)[^;/\\)]*?)(?:Build|Linux|MIUI|[;/\\)])',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'; (SAMSUNG-)([A-Za-z0-9\\-]+).* Build/',
None,
'Samsung $1$2',
'Samsung',
'$2',
),
DeviceParser(
'; *((?:SCH|SGH|SHV|SHW|SPH|SC|SM)\\-[A-Za-z0-9 ]+)(/?[^ ]*)? Build',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
' ((?:SCH)\\-[A-Za-z0-9 ]+)(/?[^ ]*)? Build',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'; *(Behold ?(?:2|II)|YP\\-G[^;/]+|EK-GC100|SCL21|I9300) Build',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'; *(SH\\-?\\d\\d[^;/]+|SBM\\d[^;/]+) Build',
None,
'$1',
'Sharp',
'$1',
),
DeviceParser(
'; *(SHARP[ -])([^;/]+) Build',
None,
'$1$2',
'Sharp',
'$2',
),
DeviceParser(
'; *(SPX[_\\-]\\d[^;/]*) Build/',
None,
'$1',
'Simvalley',
'$1',
),
DeviceParser(
'; *(SX7\\-PEARL\\.GmbH) Build/',
None,
'$1',
'Simvalley',
'$1',
),
DeviceParser(
'; *(SP[T]?\\-\\d{2}[^;/]*) Build/',
None,
'$1',
'Simvalley',
'$1',
),
DeviceParser(
'; *(SK\\-.*) Build/',
None,
'$1',
'SKtelesys',
'$1',
),
DeviceParser(
'; *(?:SKYTEX|SX)-([^;/]+) Build',
None,
'$1',
'Skytex',
'$1',
),
DeviceParser(
'; *(IMAGINE [^;/]+) Build',
None,
'$1',
'Skytex',
'$1',
),
DeviceParser(
'; *(SmartQ) ?([^;/]+) Build/',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(WF7C|WF10C|SBT[^;/]+) Build',
None,
'$1',
'Smartbitt',
'$1',
),
DeviceParser(
'; *(SBM(?:003SH|005SH|006SH|007SH|102SH)) Build',
None,
'$1',
'Sharp',
'$1',
),
DeviceParser(
'; *(003P|101P|101P11C|102P) Build',
None,
'$1',
'Panasonic',
'$1',
),
DeviceParser(
'; *(00\\dZ) Build/',
None,
'$1',
'ZTE',
'$1',
),
DeviceParser(
'; HTC(X06HT) Build',
None,
'$1',
'HTC',
'$1',
),
DeviceParser(
'; *(001HT|X06HT) Build',
None,
'$1',
'HTC',
'$1',
),
DeviceParser(
'; *(201M) Build',
None,
'$1',
'Motorola',
'XT902',
),
DeviceParser(
'; *(ST\\d{4}.*)Build/ST',
None,
'Trekstor $1',
'Trekstor',
'$1',
),
DeviceParser(
'; *(ST\\d{4}.*) Build/',
None,
'Trekstor $1',
'Trekstor',
'$1',
),
DeviceParser(
'; *(Sony ?Ericsson ?)([^;/]+) Build',
None,
'$1$2',
'SonyEricsson',
'$2',
),
DeviceParser(
'; *((?:SK|ST|E|X|LT|MK|MT|WT)\\d{2}[a-z0-9]*(?:-o)?|R800i|U20i) Build',
None,
'$1',
'SonyEricsson',
'$1',
),
DeviceParser(
'; *(Xperia (?:A8|Arc|Acro|Active|Live with Walkman|Mini|Neo|Play|Pro|Ray|X\\d+)[^;/]*) Build',
'i',
'$1',
'SonyEricsson',
'$1',
),
DeviceParser(
'; Sony (Tablet[^;/]+) Build',
None,
'Sony $1',
'Sony',
'$1',
),
DeviceParser(
'; Sony ([^;/]+) Build',
None,
'Sony $1',
'Sony',
'$1',
),
DeviceParser(
'; *(Sony)([A-Za-z0-9\\-]+) Build',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(Xperia [^;/]+) Build',
None,
'$1',
'Sony',
'$1',
),
DeviceParser(
'; *(C(?:1[0-9]|2[0-9]|53|55|6[0-9])[0-9]{2}|D[25]\\d{3}|D6[56]\\d{2}) Build',
None,
'$1',
'Sony',
'$1',
),
DeviceParser(
'; *(SGP\\d{3}|SGPT\\d{2}) Build',
None,
'$1',
'Sony',
'$1',
),
DeviceParser(
'; *(NW-Z1000Series) Build',
None,
'$1',
'Sony',
'$1',
),
DeviceParser(
'PLAYSTATION 3',
None,
'PlayStation 3',
'Sony',
'PlayStation 3',
),
DeviceParser(
'(PlayStation (?:Portable|Vita|\\d+))',
None,
'$1',
'Sony',
'$1',
),
DeviceParser(
'; *((?:CSL_Spice|Spice|SPICE|CSL)[ _\\-]?)?([Mm][Ii])([ _\\-])?(\\d{3}[^;/]*) Build/',
None,
'$1$2$3$4',
'Spice',
'Mi$4',
),
DeviceParser(
'; *(Sprint )(.+?) *(?:Build|[;/])',
None,
'$1$2',
'Sprint',
'$2',
),
DeviceParser(
'\\b(Sprint)[: ]([^;,/ ]+)',
None,
'$1$2',
'Sprint',
'$2',
),
DeviceParser(
'; *(TAGI[ ]?)(MID) ?([^;/]+) Build/',
None,
'$1$2$3',
'Tagi',
'$2$3',
),
DeviceParser(
'; *(Oyster500|Opal 800) Build',
None,
'Tecmobile $1',
'Tecmobile',
'$1',
),
DeviceParser(
'; *(TECNO[ _])([^;/]+) Build/',
None,
'$1$2',
'Tecno',
'$2',
),
DeviceParser(
'; *Android for (Telechips|Techvision) ([^ ]+) ',
'i',
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(T-Hub2) Build/',
None,
'$1',
'Telstra',
'$1',
),
DeviceParser(
'; *(PAD) ?(100[12]) Build/',
None,
'Terra $1$2',
'Terra',
'$1$2',
),
DeviceParser(
'; *(T[BM]-\\d{3}[^;/]+) Build/',
None,
'$1',
'Texet',
'$1',
),
DeviceParser(
'; *(tolino [^;/]+) Build',
None,
'$1',
'Thalia',
'$1',
),
DeviceParser(
'; *Build/.* (TOLINO_BROWSER)',
None,
'$1',
'Thalia',
'Tolino Shine',
),
DeviceParser(
'; *(?:CJ[ -])?(ThL|THL)[ -]([^;/]+) Build/',
None,
'$1 $2',
'Thl',
'$2',
),
DeviceParser(
'; *(T100|T200|T5|W100|W200|W8s) Build/',
None,
'$1',
'Thl',
'$1',
),
DeviceParser(
'; *(T-Mobile[ _]G2[ _]Touch) Build',
None,
'$1',
'HTC',
'Hero',
),
DeviceParser(
'; *(T-Mobile[ _]G2) Build',
None,
'$1',
'HTC',
'Desire Z',
),
DeviceParser(
'; *(T-Mobile myTouch Q) Build',
None,
'$1',
'Huawei',
'U8730',
),
DeviceParser(
'; *(T-Mobile myTouch) Build',
None,
'$1',
'Huawei',
'U8680',
),
DeviceParser(
'; *(T-Mobile_Espresso) Build',
None,
'$1',
'HTC',
'Espresso',
),
DeviceParser(
'; *(T-Mobile G1) Build',
None,
'$1',
'HTC',
'Dream',
),
DeviceParser(
'\\b(T-Mobile ?)?(myTouch)[ _]?([34]G)[ _]?([^\\/]*) (?:Mozilla|Build)',
None,
'$1$2 $3 $4',
'HTC',
'$2 $3 $4',
),
DeviceParser(
'\\b(T-Mobile)_([^_]+)_(.*) Build',
None,
'$1 $2 $3',
'Tmobile',
'$2 $3',
),
DeviceParser(
'\\b(T-Mobile)[_ ]?(.*?)Build',
None,
'$1 $2',
'Tmobile',
'$2',
),
DeviceParser(
' (ATP[0-9]{4}) Build',
None,
'$1',
'Tomtec',
'$1',
),
DeviceParser(
' *(TOOKY)[ _\\-]([^;/]+) ?(?:Build|;)',
'i',
'$1 $2',
'Tooky',
'$2',
),
DeviceParser(
'\\b(TOSHIBA_AC_AND_AZ|TOSHIBA_FOLIO_AND_A|FOLIO_AND_A)',
None,
'$1',
'Toshiba',
'Folio 100',
),
DeviceParser(
'; *([Ff]olio ?100) Build/',
None,
'$1',
'Toshiba',
'Folio 100',
),
DeviceParser(
'; *(AT[0-9]{2,3}(?:\\-A|LE\\-A|PE\\-A|SE|a)?|AT7-A|AT1S0|Hikari-iFrame/WDPF-[^;/]+|THRiVE|Thrive) Build/',
None,
'Toshiba $1',
'Toshiba',
'$1',
),
DeviceParser(
'; *(TM-MID\\d+[^;/]+|TOUCHMATE|MID-750) Build',
None,
'$1',
'Touchmate',
'$1',
),
DeviceParser(
'; *(TM-SM\\d+[^;/]+) Build',
None,
'$1',
'Touchmate',
'$1',
),
DeviceParser(
'; *(A10 [Bb]asic2?) Build/',
None,
'$1',
'Treq',
'$1',
),
DeviceParser(
'; *(TREQ[ _\\-])([^;/]+) Build',
'i',
'$1$2',
'Treq',
'$2',
),
DeviceParser(
'; *(X-?5|X-?3) Build/',
None,
'$1',
'Umeox',
'$1',
),
DeviceParser(
'; *(A502\\+?|A936|A603|X1|X2) Build/',
None,
'$1',
'Umeox',
'$1',
),
DeviceParser(
'(TOUCH(?:TAB|PAD).+?) Build/',
'i',
'Versus $1',
'Versus',
'$1',
),
DeviceParser(
'(VERTU) ([^;/]+) Build/',
None,
'$1 $2',
'Vertu',
'$2',
),
DeviceParser(
'; *(Videocon)[ _\\-]([^;/]+) *(?:Build|;)',
None,
'$1 $2',
'Videocon',
'$2',
),
DeviceParser(
' (VT\\d{2}[A-Za-z]*) Build',
None,
'$1',
'Videocon',
'$1',
),
DeviceParser(
'; *((?:ViewPad|ViewPhone|VSD)[^;/]+) Build/',
None,
'$1',
'Viewsonic',
'$1',
),
DeviceParser(
'; *(ViewSonic-)([^;/]+) Build/',
None,
'$1$2',
'Viewsonic',
'$2',
),
DeviceParser(
'; *(GTablet.*) Build/',
None,
'$1',
'Viewsonic',
'$1',
),
DeviceParser(
'; *([Vv]ivo)[ _]([^;/]+) Build',
None,
'$1 $2',
'vivo',
'$2',
),
DeviceParser(
'(Vodafone) (.*) Build/',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(?:Walton[ _\\-])?(Primo[ _\\-][^;/]+) Build',
'i',
'Walton $1',
'Walton',
'$1',
),
DeviceParser(
'; *(?:WIKO[ \\-])?(CINK\\+?|BARRY|BLOOM|DARKFULL|DARKMOON|DARKNIGHT|DARKSIDE|FIZZ|HIGHWAY|IGGY|OZZY|RAINBOW|STAIRWAY|SUBLIM|WAX|CINK [^;/]+) Build/',
'i',
'Wiko $1',
'Wiko',
'$1',
),
DeviceParser(
'; *WellcoM-([^;/]+) Build',
None,
'Wellcom $1',
'Wellcom',
'$1',
),
DeviceParser(
'(?:(WeTab)-Browser|; (wetab) Build)',
None,
'$1',
'WeTab',
'WeTab',
),
DeviceParser(
'; *(AT-AS[^;/]+) Build',
None,
'Wolfgang $1',
'Wolfgang',
'$1',
),
DeviceParser(
'; *(?:Woxter|Wxt) ([^;/]+) Build',
None,
'Woxter $1',
'Woxter',
'$1',
),
DeviceParser(
'; *(?:Xenta |Luna )?(TAB[234][0-9]{2}|TAB0[78]-\\d{3}|TAB0?9-\\d{3}|TAB1[03]-\\d{3}|SMP\\d{2}-\\d{3}) Build/',
None,
'Yarvik $1',
'Yarvik',
'$1',
),
DeviceParser(
'; *([A-Z]{2,4})(M\\d{3,}[A-Z]{2})([^;\\)\\/]*)(?: Build|[;\\)])',
None,
'Yifang $1$2$3',
'Yifang',
'$2',
),
DeviceParser(
'; *((MI|HM|MI-ONE|Redmi)[ -](NOTE |Note )?[^;/]*) Build/',
None,
'XiaoMi $1',
'XiaoMi',
'$1',
),
DeviceParser(
'; *XOLO[ _]([^;/]*tab.*) Build',
'i',
'Xolo $1',
'Xolo',
'$1',
),
DeviceParser(
'; *XOLO[ _]([^;/]+) Build',
'i',
'Xolo $1',
'Xolo',
'$1',
),
DeviceParser(
'; *(q\\d0{2,3}[a-z]?) Build',
'i',
'Xolo $1',
'Xolo',
'$1',
),
DeviceParser(
'; *(PAD ?[79]\\d+[^;/]*|TelePAD\\d+[^;/]) Build',
None,
'Xoro $1',
'Xoro',
'$1',
),
DeviceParser(
'; *(?:(?:ZOPO|Zopo)[ _]([^;/]+)|(ZP ?(?:\\d{2}[^;/]+|C2))|(C[2379])) Build',
None,
'$1$2$3',
'Zopo',
'$1$2$3',
),
DeviceParser(
'; *(ZiiLABS) (Zii[^;/]*) Build',
None,
'$1 $2',
'ZiiLabs',
'$2',
),
DeviceParser(
'; *(Zii)_([^;/]*) Build',
None,
'$1 $2',
'ZiiLabs',
'$2',
),
DeviceParser(
'; *(ARIZONA|(?:ATLAS|Atlas) W|D930|Grand (?:[SX][^;]*|Era|Memo[^;]*)|JOE|(?:Kis|KIS)\\b[^;]*|Libra|Light [^;]*|N8[056][01]|N850L|N8000|N9[15]\\d{2}|N9810|NX501|Optik|(?:Vip )Racer[^;]*|RacerII|RACERII|San Francisco[^;]*|V9[AC]|V55|V881|Z[679][0-9]{2}[A-z]?) Build',
None,
'$1',
'ZTE',
'$1',
),
DeviceParser(
'; *([A-Z]\\d+)_USA_[^;]* Build',
None,
'$1',
'ZTE',
'$1',
),
DeviceParser(
'; *(SmartTab\\d+)[^;]* Build',
None,
'$1',
'ZTE',
'$1',
),
DeviceParser(
'; *(?:Blade|BLADE|ZTE-BLADE)([^;/]*) Build',
None,
'ZTE Blade$1',
'ZTE',
'Blade$1',
),
DeviceParser(
'; *(?:Skate|SKATE|ZTE-SKATE)([^;/]*) Build',
None,
'ZTE Skate$1',
'ZTE',
'Skate$1',
),
DeviceParser(
'; *(Orange |Optimus )(Monte Carlo|San Francisco) Build',
None,
'$1$2',
'ZTE',
'$1$2',
),
DeviceParser(
'; *(?:ZXY-ZTE_|ZTE\\-U |ZTE[\\- _]|ZTE-C[_ ])([^;/]+) Build',
None,
'ZTE $1',
'ZTE',
'$1',
),
DeviceParser(
'; (BASE) (lutea|Lutea 2|Tab[^;]*) Build',
None,
'$1 $2',
'ZTE',
'$1 $2',
),
DeviceParser(
'; (Avea inTouch 2|soft stone|tmn smart a7|Movistar[ _]Link) Build',
'i',
'$1',
'ZTE',
'$1',
),
DeviceParser(
'; *(vp9plus)\\)',
None,
'$1',
'ZTE',
'$1',
),
DeviceParser(
'; ?(Cloud[ _]Z5|z1000|Z99 2G|z99|z930|z999|z990|z909|Z919|z900) Build/',
None,
'$1',
'Zync',
'$1',
),
DeviceParser(
'; ?(KFOT|Kindle Fire) Build\\b',
None,
'Kindle Fire',
'Amazon',
'Kindle Fire',
),
DeviceParser(
'; ?(KFOTE|Amazon Kindle Fire2) Build\\b',
None,
'Kindle Fire 2',
'Amazon',
'Kindle Fire 2',
),
DeviceParser(
'; ?(KFTT) Build\\b',
None,
'Kindle Fire HD',
'Amazon',
'Kindle Fire HD 7"',
),
DeviceParser(
'; ?(KFJWI) Build\\b',
None,
'Kindle Fire HD 8.9" WiFi',
'Amazon',
'Kindle Fire HD 8.9" WiFi',
),
DeviceParser(
'; ?(KFJWA) Build\\b',
None,
'Kindle Fire HD 8.9" 4G',
'Amazon',
'Kindle Fire HD 8.9" 4G',
),
DeviceParser(
'; ?(KFSOWI) Build\\b',
None,
'Kindle Fire HD 7" WiFi',
'Amazon',
'Kindle Fire HD 7" WiFi',
),
DeviceParser(
'; ?(KFTHWI) Build\\b',
None,
'Kindle Fire HDX 7" WiFi',
'Amazon',
'Kindle Fire HDX 7" WiFi',
),
DeviceParser(
'; ?(KFTHWA) Build\\b',
None,
'Kindle Fire HDX 7" 4G',
'Amazon',
'Kindle Fire HDX 7" 4G',
),
DeviceParser(
'; ?(KFAPWI) Build\\b',
None,
'Kindle Fire HDX 8.9" WiFi',
'Amazon',
'Kindle Fire HDX 8.9" WiFi',
),
DeviceParser(
'; ?(KFAPWA) Build\\b',
None,
'Kindle Fire HDX 8.9" 4G',
'Amazon',
'Kindle Fire HDX 8.9" 4G',
),
DeviceParser(
'; ?Amazon ([^;/]+) Build\\b',
None,
'$1',
'Amazon',
'$1',
),
DeviceParser(
'; ?(Kindle) Build\\b',
None,
'Kindle',
'Amazon',
'Kindle',
),
DeviceParser(
'; ?(Silk)/(\\d+)\\.(\\d+)(?:\\.([0-9\\-]+))? Build\\b',
None,
'Kindle Fire',
'Amazon',
'Kindle Fire$2',
),
DeviceParser(
' (Kindle)/(\\d+\\.\\d+)',
None,
'Kindle',
'Amazon',
'$1 $2',
),
DeviceParser(
' (Silk|Kindle)/(\\d+)\\.',
None,
'Kindle',
'Amazon',
'Kindle',
),
DeviceParser(
'(sprd)\\-([^/]+)/',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'; *(H\\d{2}00\\+?) Build',
None,
'$1',
'Hero',
'$1',
),
DeviceParser(
'; *(iphone|iPhone5) Build/',
None,
'Xianghe $1',
'Xianghe',
'$1',
),
DeviceParser(
'; *(e\\d{4}[a-z]?_?v\\d+|v89_[^;/]+)[^;/]+ Build/',
None,
'Xianghe $1',
'Xianghe',
'$1',
),
DeviceParser(
'\\bUSCC[_\\-]?([^ ;/\\)]+)',
None,
'$1',
'Cellular',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?)?(?:ALCATEL)[^;]*; *([^;,\\)]+)',
None,
'Alcatel $1',
'Alcatel',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?)?(?:ASUS|Asus)[^;]*; *([^;,\\)]+)',
None,
'Asus $1',
'Asus',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?)?(?:DELL|Dell)[^;]*; *([^;,\\)]+)',
None,
'Dell $1',
'Dell',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?)?(?:HTC|Htc|HTC_blocked[^;]*)[^;]*; *(?:HTC)?([^;,\\)]+)',
None,
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?)?(?:HUAWEI)[^;]*; *(?:HUAWEI )?([^;,\\)]+)',
None,
'Huawei $1',
'Huawei',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?)?(?:LG|Lg)[^;]*; *(?:LG[ \\-])?([^;,\\)]+)',
None,
'LG $1',
'LG',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?)?(?:NOKIA|Nokia)[^;]*; *(?:NOKIA ?|Nokia ?|LUMIA ?|[Ll]umia ?)*(\\d{3,}[^;\\)]*)',
None,
'Lumia $1',
'Nokia',
'Lumia $1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?)?(?:NOKIA|Nokia)[^;]*; *(RM-\\d{3,})',
None,
'Nokia $1',
'Nokia',
'$1',
),
DeviceParser(
'(?:Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)]|WPDesktop;) ?(?:ARM; ?Touch; ?|Touch; ?)?(?:NOKIA|Nokia)[^;]*; *(?:NOKIA ?|Nokia ?|LUMIA ?|[Ll]umia ?)*([^;\\)]+)',
None,
'Nokia $1',
'Nokia',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?)?(?:Microsoft(?: Corporation)?)[^;]*; *([^;,\\)]+)',
None,
'Microsoft $1',
'Microsoft',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?)?(?:SAMSUNG)[^;]*; *(?:SAMSUNG )?([^;,\\.\\)]+)',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?)?(?:TOSHIBA|FujitsuToshibaMobileCommun)[^;]*; *([^;,\\)]+)',
None,
'Toshiba $1',
'Toshiba',
'$1',
),
DeviceParser(
'Windows Phone [^;]+; .*?IEMobile/[^;\\)]+[;\\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?)?([^;]+); *([^;,\\)]+)',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'(?:^|; )SAMSUNG\\-([A-Za-z0-9\\-]+).* Bada/',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'\\(Mobile; ALCATEL ?(One|ONE) ?(Touch|TOUCH) ?([^;/]+)(?:/[^;]+)?; rv:[^\\)]+\\) Gecko/[^\\/]+ Firefox/',
None,
'Alcatel $1 $2 $3',
'Alcatel',
'One Touch $3',
),
DeviceParser(
'\\(Mobile; (?:ZTE([^;]+)|(OpenC)); rv:[^\\)]+\\) Gecko/[^\\/]+ Firefox/',
None,
'ZTE $1$2',
'ZTE',
'$1$2',
),
DeviceParser(
'Nokia(N[0-9]+)([A-z_\\-][A-z0-9_\\-]*)',
None,
'Nokia $1',
'Nokia',
'$1$2',
),
DeviceParser(
'(?:NOKIA|Nokia)(?:\\-| *)(?:([A-Za-z0-9]+)\\-[0-9a-f]{32}|([A-Za-z0-9\\-]+)(?:UCBrowser)|([A-Za-z0-9\\-]+))',
None,
'Nokia $1$2$3',
'Nokia',
'$1$2$3',
),
DeviceParser(
'Lumia ([A-Za-z0-9\\-]+)',
None,
'Lumia $1',
'Nokia',
'Lumia $1',
),
DeviceParser(
'\\(Symbian; U; S60 V5; [A-z]{2}\\-[A-z]{2}; (SonyEricsson|Samsung|Nokia|LG)([^;/]+)\\)',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'\\(Symbian(?:/3)?; U; ([^;]+);',
None,
'Nokia $1',
'Nokia',
'$1',
),
DeviceParser(
'BB10; ([A-Za-z0-9\\- ]+)\\)',
None,
'BlackBerry $1',
'BlackBerry',
'$1',
),
DeviceParser(
'Play[Bb]ook.+RIM Tablet OS',
None,
'BlackBerry Playbook',
'BlackBerry',
'Playbook',
),
DeviceParser(
'Black[Bb]erry ([0-9]+);',
None,
'BlackBerry $1',
'BlackBerry',
'$1',
),
DeviceParser(
'Black[Bb]erry([0-9]+)',
None,
'BlackBerry $1',
'BlackBerry',
'$1',
),
DeviceParser(
'Black[Bb]erry;',
None,
'BlackBerry',
'BlackBerry',
None,
),
DeviceParser(
'(Pre|Pixi)/\\d+\\.\\d+',
None,
'Palm $1',
'Palm',
'$1',
),
DeviceParser(
'Palm([0-9]+)',
None,
'Palm $1',
'Palm',
'$1',
),
DeviceParser(
'Treo([A-Za-z0-9]+)',
None,
'Palm Treo $1',
'Palm',
'Treo $1',
),
DeviceParser(
'webOS.*(P160U(?:NA)?)/(\\d+).(\\d+)',
None,
'HP Veer',
'HP',
'Veer',
),
DeviceParser(
'(Touch[Pp]ad)/\\d+\\.\\d+',
None,
'HP TouchPad',
'HP',
'TouchPad',
),
DeviceParser(
'HPiPAQ([A-Za-z0-9]+)/\\d+.\\d+',
None,
'HP iPAQ $1',
'HP',
'iPAQ $1',
),
DeviceParser(
'PDA; (PalmOS)/sony/model ([a-z]+)/Revision',
None,
'$1',
'Sony',
'$1 $2',
),
DeviceParser(
'(Apple\\s?TV)',
None,
'AppleTV',
'Apple',
'AppleTV',
),
DeviceParser(
'(QtCarBrowser)',
None,
'Tesla Model S',
'Tesla',
'Model S',
),
DeviceParser(
'((?:iPhone|iPad|iPod)\\d+,\\d+)',
None,
'$1',
'Apple',
'$1',
),
DeviceParser(
'(iPad)(?:;| Simulator;)',
None,
'$1',
'Apple',
'$1',
),
DeviceParser(
'(iPod)(?:;| touch;| Simulator;)',
None,
'$1',
'Apple',
'$1',
),
DeviceParser(
'(iPhone)(?:;| Simulator;)',
None,
'$1',
'Apple',
'$1',
),
DeviceParser(
'CFNetwork/.* Darwin/\\d.*\\(((?:Mac|iMac|PowerMac|PowerBook)[^\\d]*)(\\d+)(?:,|%2C)(\\d+)',
None,
'$1$2,$3',
'Apple',
'$1$2,$3',
),
DeviceParser(
'CFNetwork/.* Darwin/\\d',
None,
'iOS-Device',
'Apple',
'iOS-Device',
),
DeviceParser(
'acer_([A-Za-z0-9]+)_',
None,
'Acer $1',
'Acer',
'$1',
),
DeviceParser(
'(?:ALCATEL|Alcatel)-([A-Za-z0-9\\-]+)',
None,
'Alcatel $1',
'Alcatel',
'$1',
),
DeviceParser(
'(?:Amoi|AMOI)\\-([A-Za-z0-9]+)',
None,
'Amoi $1',
'Amoi',
'$1',
),
DeviceParser(
'(?:; |\\/|^)((?:Transformer (?:Pad|Prime) |Transformer |PadFone[ _]?)[A-Za-z0-9]*)',
None,
'Asus $1',
'Asus',
'$1',
),
DeviceParser(
'(?:asus.*?ASUS|Asus|ASUS|asus)[\\- ;]*((?:Transformer (?:Pad|Prime) |Transformer |Padfone |Nexus[ _])?[A-Za-z0-9]+)',
None,
'Asus $1',
'Asus',
'$1',
),
DeviceParser(
'\\bBIRD[ \\-\\.]([A-Za-z0-9]+)',
None,
'Bird $1',
'Bird',
'$1',
),
DeviceParser(
'\\bDell ([A-Za-z0-9]+)',
None,
'Dell $1',
'Dell',
'$1',
),
DeviceParser(
'DoCoMo/2\\.0 ([A-Za-z0-9]+)',
None,
'DoCoMo $1',
'DoCoMo',
'$1',
),
DeviceParser(
'([A-Za-z0-9]+)_W;FOMA',
None,
'DoCoMo $1',
'DoCoMo',
'$1',
),
DeviceParser(
'([A-Za-z0-9]+);FOMA',
None,
'DoCoMo $1',
'DoCoMo',
'$1',
),
DeviceParser(
'\\b(?:HTC/|HTC/[a-z0-9]+/)?HTC[ _\\-;]? *(.*?)(?:-?Mozilla|fingerPrint|[;/\\(\\)]|$)',
None,
'HTC $1',
'HTC',
'$1',
),
DeviceParser(
'Huawei([A-Za-z0-9]+)',
None,
'Huawei $1',
'Huawei',
'$1',
),
DeviceParser(
'HUAWEI-([A-Za-z0-9]+)',
None,
'Huawei $1',
'Huawei',
'$1',
),
DeviceParser(
'vodafone([A-Za-z0-9]+)',
None,
'Huawei Vodafone $1',
'Huawei',
'Vodafone $1',
),
DeviceParser(
'i\\-mate ([A-Za-z0-9]+)',
None,
'i-mate $1',
'i-mate',
'$1',
),
DeviceParser(
'Kyocera\\-([A-Za-z0-9]+)',
None,
'Kyocera $1',
'Kyocera',
'$1',
),
DeviceParser(
'KWC\\-([A-Za-z0-9]+)',
None,
'Kyocera $1',
'Kyocera',
'$1',
),
DeviceParser(
'Lenovo[_\\-]([A-Za-z0-9]+)',
None,
'Lenovo $1',
'Lenovo',
'$1',
),
DeviceParser(
'(HbbTV)/[0-9]+\\.[0-9]+\\.[0-9]+ \\([^;]*; *(LG)E *; *([^;]*) *;[^;]*;[^;]*;\\)',
None,
'$1',
'$2',
'$3',
),
DeviceParser(
'(HbbTV)/1\\.1\\.1.*CE-HTML/1\\.\\d;(Vendor/)*(THOM[^;]*?)[;\\s](?:.*SW-Version/.*)*(LF[^;]+);?',
None,
'$1',
'Thomson',
'$4',
),
DeviceParser(
'(HbbTV)(?:/1\\.1\\.1)?(?: ?\\(;;;;;\\))?; *CE-HTML(?:/1\\.\\d)?; *([^ ]+) ([^;]+);',
None,
'$1',
'$2',
'$3',
),
DeviceParser(
'(HbbTV)/1\\.1\\.1 \\(;;;;;\\) Maple_2011',
None,
'$1',
'Samsung',
None,
),
DeviceParser(
'(HbbTV)/[0-9]+\\.[0-9]+\\.[0-9]+ \\([^;]*; *(?:CUS:([^;]*)|([^;]+)) *; *([^;]*) *;.*;',
None,
'$1',
'$2$3',
'$4',
),
DeviceParser(
'(HbbTV)/[0-9]+\\.[0-9]+\\.[0-9]+',
None,
'$1',
None,
None,
),
DeviceParser(
'LGE; (?:Media\\/)?([^;]*);[^;]*;[^;]*;?\\); "?LG NetCast(\\.TV|\\.Media|)-\\d+',
None,
'NetCast$2',
'LG',
'$1',
),
DeviceParser(
'InettvBrowser/[0-9]+\\.[0-9A-Z]+ \\([^;]*;(Sony)([^;]*);[^;]*;[^\\)]*\\)',
None,
'Inettv',
'$1',
'$2',
),
DeviceParser(
'InettvBrowser/[0-9]+\\.[0-9A-Z]+ \\([^;]*;([^;]*);[^;]*;[^\\)]*\\)',
None,
'Inettv',
'Generic_Inettv',
'$1',
),
DeviceParser(
'(?:InettvBrowser|TSBNetTV|NETTV|HBBTV)',
None,
'Inettv',
'Generic_Inettv',
None,
),
DeviceParser(
'Series60/\\d\\.\\d (LG)[\\-]?([A-Za-z0-9 \\-]+)',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'\\b(?:LGE[ \\-]LG\\-(?:AX)?|LGE |LGE?-LG|LGE?[ \\-]|LG[ /\\-]|lg[\\-])([A-Za-z0-9]+)\\b',
None,
'LG $1',
'LG',
'$1',
),
DeviceParser(
'(?:^LG[\\-]?|^LGE[\\-/]?)([A-Za-z]+[0-9]+[A-Za-z]*)',
None,
'LG $1',
'LG',
'$1',
),
DeviceParser(
'^LG([0-9]+[A-Za-z]*)',
None,
'LG $1',
'LG',
'$1',
),
DeviceParser(
'(KIN\\.[^ ]+) (\\d+)\\.(\\d+)',
None,
'Microsoft $1',
'Microsoft',
'$1',
),
DeviceParser(
'(?:MSIE|XBMC).*\\b(Xbox)\\b',
None,
'$1',
'Microsoft',
'$1',
),
DeviceParser(
'; ARM; Trident/6\\.0; Touch[\\);]',
None,
'Microsoft Surface RT',
'Microsoft',
'Surface RT',
),
DeviceParser(
'Motorola\\-([A-Za-z0-9]+)',
None,
'Motorola $1',
'Motorola',
'$1',
),
DeviceParser(
'MOTO\\-([A-Za-z0-9]+)',
None,
'Motorola $1',
'Motorola',
'$1',
),
DeviceParser(
'MOT\\-([A-z0-9][A-z0-9\\-]*)',
None,
'Motorola $1',
'Motorola',
'$1',
),
DeviceParser(
'Nintendo WiiU',
None,
'Nintendo Wii U',
'Nintendo',
'Wii U',
),
DeviceParser(
'Nintendo (DS|3DS|DSi|Wii);',
None,
'Nintendo $1',
'Nintendo',
'$1',
),
DeviceParser(
'(?:Pantech|PANTECH)[ _-]?([A-Za-z0-9\\-]+)',
None,
'Pantech $1',
'Pantech',
'$1',
),
DeviceParser(
'Philips([A-Za-z0-9]+)',
None,
'Philips $1',
'Philips',
'$1',
),
DeviceParser(
'Philips ([A-Za-z0-9]+)',
None,
'Philips $1',
'Philips',
'$1',
),
DeviceParser(
'SymbianOS/9\\.\\d.* Samsung[/\\-]([A-Za-z0-9 \\-]+)',
None,
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'(Samsung)(SGH)(i[0-9]+)',
None,
'$1 $2$3',
'$1',
'$2-$3',
),
DeviceParser(
'SAMSUNG-ANDROID-MMS/([^;/]+)',
None,
'$1',
'Samsung',
'$1',
),
DeviceParser(
'SAMSUNG(?:; |[ -/])([A-Za-z0-9\\-]+)',
'i',
'Samsung $1',
'Samsung',
'$1',
),
DeviceParser(
'(Dreamcast)',
None,
'Sega $1',
'Sega',
'$1',
),
DeviceParser(
'^SIE-([A-Za-z0-9]+)',
None,
'Siemens $1',
'Siemens',
'$1',
),
DeviceParser(
'Softbank/[12]\\.0/([A-Za-z0-9]+)',
None,
'Softbank $1',
'Softbank',
'$1',
),
DeviceParser(
'SonyEricsson ?([A-Za-z0-9\\-]+)',
None,
'Ericsson $1',
'SonyEricsson',
'$1',
),
DeviceParser(
'Android [^;]+; ([^ ]+) (Sony)/',
None,
'$2 $1',
'$2',
'$1',
),
DeviceParser(
'(Sony)(?:BDP\\/|\\/)?([^ /;\\)]+)[ /;\\)]',
None,
'$1 $2',
'$1',
'$2',
),
DeviceParser(
'Puffin/[\\d\\.]+IT',
None,
'iPad',
'Apple',
'iPad',
),
DeviceParser(
'Puffin/[\\d\\.]+IP',
None,
'iPhone',
'Apple',
'iPhone',
),
DeviceParser(
'Puffin/[\\d\\.]+AT',
None,
'Generic Tablet',
'Generic',
'Tablet',
),
DeviceParser(
'Puffin/[\\d\\.]+AP',
None,
'Generic Smartphone',
'Generic',
'Smartphone',
),
DeviceParser(
'Android[\\- ][\\d]+\\.[\\d]+; [A-Za-z]{2}\\-[A-Za-z]{0,2}; WOWMobile (.+) Build',
None,
None,
'Generic_Android',
'$1',
),
DeviceParser(
'Android[\\- ][\\d]+\\.[\\d]+\\-update1; [A-Za-z]{2}\\-[A-Za-z]{0,2} *; *(.+?) Build',
None,
None,
'Generic_Android',
'$1',
),
DeviceParser(
'Android[\\- ][\\d]+(?:\\.[\\d]+){1,2}; *[A-Za-z]{2}[_\\-][A-Za-z]{0,2}\\-? *; *(.+?) Build',
None,
None,
'Generic_Android',
'$1',
),
DeviceParser(
'Android[\\- ][\\d]+(?:\\.[\\d]+){1,2}; *[A-Za-z]{0,2}\\- *; *(.+?) Build',
None,
None,
'Generic_Android',
'$1',
),
DeviceParser(
'Android[\\- ][\\d]+(?:\\.[\\d]+){1,2}; *[a-z]{0,2}[_\\-]?[A-Za-z]{0,2};? Build',
None,
'Generic Smartphone',
'Generic',
'Smartphone',
),
DeviceParser(
'Android[\\- ][\\d]+(?:\\.[\\d]+){1,2}; *\\-?[A-Za-z]{2}; *(.+?) Build',
None,
None,
'Generic_Android',
'$1',
),
DeviceParser(
'Android[\\- ][\\d]+(?:\\.[\\d]+){1,2}(?:;.*)?; *(.+?) Build',
None,
None,
'Generic_Android',
'$1',
),
DeviceParser(
'(GoogleTV)',
None,
None,
'Generic_Inettv',
'$1',
),
DeviceParser(
'(WebTV)/\\d+.\\d+',
None,
None,
'Generic_Inettv',
'$1',
),
DeviceParser(
'^(Roku)/DVP-\\d+\\.\\d+',
None,
None,
'Generic_Inettv',
'$1',
),
DeviceParser(
'(Android 3\\.\\d|Opera Tablet|Tablet; .+Firefox/|Android.*(?:Tab|Pad))',
'i',
'Generic Tablet',
'Generic',
'Tablet',
),
DeviceParser(
'(Symbian|\\bS60(Version|V\\d)|\\bS60\\b|\\((Series 60|Windows Mobile|Palm OS|Bada); Opera Mini|Windows CE|Opera Mobi|BREW|Brew|Mobile; .+Firefox/|iPhone OS|Android|MobileSafari|Windows *Phone|\\(webOS/|PalmOS)',
None,
'Generic Smartphone',
'Generic',
'Smartphone',
),
DeviceParser(
'(hiptop|avantgo|plucker|xiino|blazer|elaine)',
'i',
'Generic Smartphone',
'Generic',
'Smartphone',
),
DeviceParser(
'(bot|zao|borg|DBot|oegp|silk|Xenu|zeal|^NING|CCBot|crawl|htdig|lycos|slurp|teoma|voila|yahoo|Sogou|CiBra|Nutch|^Java/|^JNLP/|Daumoa|Genieo|ichiro|larbin|pompos|Scrapy|snappy|speedy|spider|msnbot|msrbot|vortex|^vortex|crawler|favicon|indexer|Riddler|scooter|scraper|scrubby|WhatWeb|WinHTTP|bingbot|openbot|gigabot|furlbot|polybot|seekbot|^voyager|archiver|Icarus6j|mogimogi|Netvibes|blitzbot|altavista|charlotte|findlinks|Retreiver|TLSProber|WordPress|SeznamBot|ProoXiBot|wsr\\-agent|Squrl Java|EtaoSpider|PaperLiBot|SputnikBot|A6\\-Indexer|netresearch|searchsight|baiduspider|YisouSpider|ICC\\-Crawler|http%20client|Python-urllib|dataparksearch|converacrawler|Screaming Frog|AppEngine-Google|YahooCacheSystem|fast\\-webcrawler|Sogou Pic Spider|semanticdiscovery|Innovazion Crawler|facebookexternalhit|Google.*/\\+/web/snippet|Google-HTTP-Java-Client|BlogBridge|IlTrovatore-Setaccio|InternetArchive|GomezAgent|WebThumbnail|heritrix|NewsGator|PagePeeker|Reaper|ZooShot|holmes)',
'i',
'Spider',
'Spider',
'Desktop',
),
DeviceParser(
'^(1207|3gso|4thp|501i|502i|503i|504i|505i|506i|6310|6590|770s|802s|a wa|acer|acs\\-|airn|alav|asus|attw|au\\-m|aur |aus |abac|acoo|aiko|alco|alca|amoi|anex|anny|anyw|aptu|arch|argo|bmobile|bell|bird|bw\\-n|bw\\-u|beck|benq|bilb|blac|c55/|cdm\\-|chtm|capi|comp|cond|dall|dbte|dc\\-s|dica|ds\\-d|ds12|dait|devi|dmob|doco|dopo|dorado|el(?:38|39|48|49|50|55|58|68)|el[3456]\\d{2}dual|erk0|esl8|ex300|ez40|ez60|ez70|ezos|ezze|elai|emul|eric|ezwa|fake|fly\\-|fly_|g\\-mo|g1 u|g560|gf\\-5|grun|gene|go.w|good|grad|hcit|hd\\-m|hd\\-p|hd\\-t|hei\\-|hp i|hpip|hs\\-c|htc |htc\\-|htca|htcg)',
'i',
'Generic Feature Phone',
'Generic',
'Feature Phone',
),
DeviceParser(
'^(htcp|htcs|htct|htc_|haie|hita|huaw|hutc|i\\-20|i\\-go|i\\-ma|i\\-mobile|i230|iac|iac\\-|iac/|ig01|im1k|inno|iris|jata|kddi|kgt|kgt/|kpt |kwc\\-|klon|lexi|lg g|lg\\-a|lg\\-b|lg\\-c|lg\\-d|lg\\-f|lg\\-g|lg\\-k|lg\\-l|lg\\-m|lg\\-o|lg\\-p|lg\\-s|lg\\-t|lg\\-u|lg\\-w|lg/k|lg/l|lg/u|lg50|lg54|lge\\-|lge/|leno|m1\\-w|m3ga|m50/|maui|mc01|mc21|mcca|medi|meri|mio8|mioa|mo01|mo02|mode|modo|mot |mot\\-|mt50|mtp1|mtv |mate|maxo|merc|mits|mobi|motv|mozz|n100|n101|n102|n202|n203|n300|n302|n500|n502|n505|n700|n701|n710|nec\\-|nem\\-|newg|neon)',
'i',
'Generic Feature Phone',
'Generic',
'Feature Phone',
),
DeviceParser(
'^(netf|noki|nzph|o2 x|o2\\-x|opwv|owg1|opti|oran|ot\\-s|p800|pand|pg\\-1|pg\\-2|pg\\-3|pg\\-6|pg\\-8|pg\\-c|pg13|phil|pn\\-2|pt\\-g|palm|pana|pire|pock|pose|psio|qa\\-a|qc\\-2|qc\\-3|qc\\-5|qc\\-7|qc07|qc12|qc21|qc32|qc60|qci\\-|qwap|qtek|r380|r600|raks|rim9|rove|s55/|sage|sams|sc01|sch\\-|scp\\-|sdk/|se47|sec\\-|sec0|sec1|semc|sgh\\-|shar|sie\\-|sk\\-0|sl45|slid|smb3|smt5|sp01|sph\\-|spv |spv\\-|sy01|samm|sany|sava|scoo|send|siem|smar|smit|soft|sony|t\\-mo|t218|t250|t600|t610|t618|tcl\\-|tdg\\-|telm|tim\\-|ts70|tsm\\-|tsm3|tsm5|tx\\-9|tagt)',
'i',
'Generic Feature Phone',
'Generic',
'Feature Phone',
),
DeviceParser(
'^(talk|teli|topl|tosh|up.b|upg1|utst|v400|v750|veri|vk\\-v|vk40|vk50|vk52|vk53|vm40|vx98|virg|vertu|vite|voda|vulc|w3c |w3c\\-|wapj|wapp|wapu|wapm|wig |wapi|wapr|wapv|wapy|wapa|waps|wapt|winc|winw|wonu|x700|xda2|xdag|yas\\-|your|zte\\-|zeto|aste|audi|avan|blaz|brew|brvw|bumb|ccwa|cell|cldc|cmd\\-|dang|eml2|fetc|hipt|http|ibro|idea|ikom|ipaq|jbro|jemu|jigs|keji|kyoc|kyok|libw|m\\-cr|midp|mmef|moto|mwbp|mywa|newt|nok6|o2im|pant|pdxg|play|pluc|port|prox|rozo|sama|seri|smal|symb|treo|upsi|vx52|vx53|vx60|vx61|vx70|vx80|vx81|vx83|vx85|wap\\-|webc|whit|wmlb|xda\\-|xda_)',
'i',
'Generic Feature Phone',
'Generic',
'Feature Phone',
),
DeviceParser(
'^(Ice)$',
None,
'Generic Feature Phone',
'Generic',
'Feature Phone',
),
DeviceParser(
'(wap[\\-\\ ]browser|maui|netfront|obigo|teleca|up\\.browser|midp|Opera Mini)',
'i',
'Generic Feature Phone',
'Generic',
'Feature Phone',
),
]
OS_PARSERS = [
OSParser(
'HbbTV/\\d+\\.\\d+\\.\\d+ \\( ;(LG)E ;NetCast 4.0',
None,
'2013',
None,
None,
None,
),
OSParser(
'HbbTV/\\d+\\.\\d+\\.\\d+ \\( ;(LG)E ;NetCast 3.0',
None,
'2012',
None,
None,
None,
),
OSParser(
'HbbTV/1.1.1 \\(;;;;;\\) Maple_2011',
'Samsung',
'2011',
None,
None,
None,
),
OSParser(
'HbbTV/\\d+\\.\\d+\\.\\d+ \\(;(Samsung);SmartTV([0-9]{4});.*FXPDEUC',
None,
None,
'UE40F7000',
None,
None,
),
OSParser(
'HbbTV/\\d+\\.\\d+\\.\\d+ \\(;(Samsung);SmartTV([0-9]{4});.*MST12DEUC',
None,
None,
'UE32F4500',
None,
None,
),
OSParser(
'HbbTV/1.1.1 \\(; (Philips);.*NETTV/4',
None,
'2013',
None,
None,
None,
),
OSParser(
'HbbTV/1.1.1 \\(; (Philips);.*NETTV/3',
None,
'2012',
None,
None,
None,
),
OSParser(
'HbbTV/1.1.1 \\(; (Philips);.*NETTV/2',
None,
'2011',
None,
None,
None,
),
OSParser(
'HbbTV/\\d+\\.\\d+\\.\\d+.*(firetv)-firefox-plugin (\\d+).(\\d+).(\\d+)',
'FireHbbTV',
None,
None,
None,
None,
),
OSParser(
'HbbTV/\\d+\\.\\d+\\.\\d+ \\(.*; ?([a-zA-Z]+) ?;.*(201[1-9]).*\\)',
None,
None,
None,
None,
None,
),
OSParser(
'(Windows Phone) (?:OS[ /])?(\\d+)\\.(\\d+)',
None,
None,
None,
None,
None,
),
OSParser(
'(Android)[ \\-/](\\d+)\\.(\\d+)(?:[.\\-]([a-z0-9]+))?',
None,
None,
None,
None,
None,
),
OSParser(
'(Android) Donut',
None,
'1',
'2',
None,
None,
),
OSParser(
'(Android) Eclair',
None,
'2',
'1',
None,
None,
),
OSParser(
'(Android) Froyo',
None,
'2',
'2',
None,
None,
),
OSParser(
'(Android) Gingerbread',
None,
'2',
'3',
None,
None,
),
OSParser(
'(Android) Honeycomb',
None,
'3',
None,
None,
None,
),
OSParser(
'^UCWEB.*; (Adr) (\\d+)\\.(\\d+)(?:[.\\-]([a-z0-9]+))?;',
'Android',
None,
None,
None,
None,
),
OSParser(
'^UCWEB.*; (iPad OS|iPh OS) (\\d+)_(\\d+)(?:_(\\d+))?;',
'iOS',
None,
None,
None,
None,
),
OSParser(
'^UCWEB.*; (wds) (\\d+)\\.(\\d+)(?:\\.(\\d+))?;',
'Windows Phone',
None,
None,
None,
None,
),
OSParser(
'^(JUC).*; ?U; ?(?:Android)?(\\d+)\\.(\\d+)(?:[\\.\\-]([a-z0-9]+))?',
'Android',
None,
None,
None,
None,
),
OSParser(
'(Silk-Accelerated=[a-z]{4,5})',
'Android',
None,
None,
None,
None,
),
OSParser(
'(XBLWP7)',
'Windows Phone',
None,
None,
None,
None,
),
OSParser(
'(Windows ?Mobile)',
'Windows Mobile',
None,
None,
None,
None,
),
OSParser(
'(Windows (?:NT 5\\.2|NT 5\\.1))',
'Windows XP',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 6\\.1)',
'Windows 7',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 6\\.0)',
'Windows Vista',
None,
None,
None,
None,
),
OSParser(
'(Win 9x 4\\.90)',
'Windows ME',
None,
None,
None,
None,
),
OSParser(
'(Windows 98|Windows XP|Windows ME|Windows 95|Windows CE|Windows 7|Windows NT 4\\.0|Windows Vista|Windows 2000|Windows 3.1)',
None,
None,
None,
None,
None,
),
OSParser(
'(Windows NT 6\\.2; ARM;)',
'Windows RT',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 6\\.2)',
'Windows 8',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 6\\.3; ARM;)',
'Windows RT 8.1',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 6\\.3)',
'Windows 8.1',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 6\\.4)',
'Windows 10',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 10\\.0)',
'Windows 10',
None,
None,
None,
None,
),
OSParser(
'(Windows NT 5\\.0)',
'Windows 2000',
None,
None,
None,
None,
),
OSParser(
'(WinNT4.0)',
'Windows NT 4.0',
None,
None,
None,
None,
),
OSParser(
'(Windows ?CE)',
'Windows CE',
None,
None,
None,
None,
),
OSParser(
'Win ?(95|98|3.1|NT|ME|2000)',
'Windows $1',
None,
None,
None,
None,
),
OSParser(
'Win16',
'Windows 3.1',
None,
None,
None,
None,
),
OSParser(
'Win32',
'Windows 95',
None,
None,
None,
None,
),
OSParser(
'(Tizen)/(\\d+)\\.(\\d+)',
None,
None,
None,
None,
None,
),
OSParser(
'((?:Mac ?|; )OS X)[\\s/](?:(\\d+)[_.](\\d+)(?:[_.](\\d+))?|Mach-O)',
'Mac OS X',
None,
None,
None,
None,
),
OSParser(
' (Dar)(win)/(9).(\\d+).*\\((?:i386|x86_64|Power Macintosh)\\)',
'Mac OS X',
'10',
'5',
None,
None,
),
OSParser(
' (Dar)(win)/(10).(\\d+).*\\((?:i386|x86_64)\\)',
'Mac OS X',
'10',
'6',
None,
None,
),
OSParser(
' (Dar)(win)/(11).(\\d+).*\\((?:i386|x86_64)\\)',
'Mac OS X',
'10',
'7',
None,
None,
),
OSParser(
' (Dar)(win)/(12).(\\d+).*\\((?:i386|x86_64)\\)',
'Mac OS X',
'10',
'8',
None,
None,
),
OSParser(
' (Dar)(win)/(13).(\\d+).*\\((?:i386|x86_64)\\)',
'Mac OS X',
'10',
'9',
None,
None,
),
OSParser(
'Mac_PowerPC',
'Mac OS',
None,
None,
None,
None,
),
OSParser(
'(?:PPC|Intel) (Mac OS X)',
None,
None,
None,
None,
None,
),
OSParser(
'(Apple\\s?TV)(?:/(\\d+)\\.(\\d+))?',
'ATV OS X',
None,
None,
None,
None,
),
OSParser(
'(CPU OS|iPhone OS|CPU iPhone) +(\\d+)[_\\.](\\d+)(?:[_\\.](\\d+))?',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(iPhone|iPad|iPod); Opera',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(iPhone|iPad|iPod).*Mac OS X.*Version/(\\d+)\\.(\\d+)',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/(5)48\\.0\\.3.* Darwin/11\\.0\\.0',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/(5)48\\.(0)\\.4.* Darwin/(1)1\\.0\\.0',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/(5)48\\.(1)\\.4',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/(4)85\\.1(3)\\.9',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/(6)09\\.(1)\\.4',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/(6)(0)9',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/6(7)2\\.(1)\\.13',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/6(7)2\\.(1)\\.(1)4',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CF)(Network)/6(7)(2)\\.1\\.15',
'iOS',
'7',
'1',
None,
None,
),
OSParser(
'(CFNetwork)/6(7)2\\.(0)\\.(?:2|8)',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(CFNetwork)/709\\.1',
'iOS',
'8',
'0.b5',
None,
None,
),
OSParser(
'(CF)(Network)/711\\.(\\d)',
'iOS',
'8',
None,
None,
None,
),
OSParser(
'(CF)(Network)/(720)\\.(\\d)',
'Mac OS X',
'10',
'10',
None,
None,
),
OSParser(
'(CF)(Network)/758\\.(\\d)',
'iOS',
'9',
None,
None,
None,
),
OSParser(
'CFNetwork/.* Darwin/(9)\\.\\d+',
'iOS',
'1',
None,
None,
None,
),
OSParser(
'CFNetwork/.* Darwin/(10)\\.\\d+',
'iOS',
'4',
None,
None,
None,
),
OSParser(
'CFNetwork/.* Darwin/(11)\\.\\d+',
'iOS',
'5',
None,
None,
None,
),
OSParser(
'CFNetwork/.* Darwin/(13)\\.\\d+',
'iOS',
'6',
None,
None,
None,
),
OSParser(
'CFNetwork/6.* Darwin/(14)\\.\\d+',
'iOS',
'7',
None,
None,
None,
),
OSParser(
'CFNetwork/7.* Darwin/(14)\\.\\d+',
'iOS',
'8',
'0',
None,
None,
),
OSParser(
'CFNetwork/7.* Darwin/(15)\\.\\d+',
'iOS',
'9',
'0',
None,
None,
),
OSParser(
'\\b(iOS[ /]|iPhone(?:/| v|[ _]OS[/,]|; | OS : |\\d,\\d/|\\d,\\d; )|iPad/)(\\d{1,2})[_\\.](\\d{1,2})(?:[_\\.](\\d+))?',
'iOS',
None,
None,
None,
None,
),
OSParser(
'(tvOS)/(\\d+).(\\d+)',
'tvOS',
None,
None,
None,
None,
),
OSParser(
'(CrOS) [a-z0-9_]+ (\\d+)\\.(\\d+)(?:\\.(\\d+))?',
'Chrome OS',
None,
None,
None,
None,
),
OSParser(
'([Dd]ebian)',
'Debian',
None,
None,
None,
None,
),
OSParser(
'(Linux Mint)(?:/(\\d+))?',
None,
None,
None,
None,
None,
),
OSParser(
'(Mandriva)(?: Linux)?/(?:[\\d.-]+m[a-z]{2}(\\d+).(\\d))?',
None,
None,
None,
None,
None,
),
OSParser(
'(Symbian[Oo][Ss])[/ ](\\d+)\\.(\\d+)',
'Symbian OS',
None,
None,
None,
None,
),
OSParser(
'(Symbian/3).+NokiaBrowser/7\\.3',
'Symbian^3 Anna',
None,
None,
None,
None,
),
OSParser(
'(Symbian/3).+NokiaBrowser/7\\.4',
'Symbian^3 Belle',
None,
None,
None,
None,
),
OSParser(
'(Symbian/3)',
'Symbian^3',
None,
None,
None,
None,
),
OSParser(
'\\b(Series 60|SymbOS|S60Version|S60V\\d|S60\\b)',
'Symbian OS',
None,
None,
None,
None,
),
OSParser(
'(MeeGo)',
None,
None,
None,
None,
None,
),
OSParser(
'Symbian [Oo][Ss]',
'Symbian OS',
None,
None,
None,
None,
),
OSParser(
'Series40;',
'Nokia Series 40',
None,
None,
None,
None,
),
OSParser(
'Series30Plus;',
'Nokia Series 30 Plus',
None,
None,
None,
None,
),
OSParser(
'(BB10);.+Version/(\\d+)\\.(\\d+)\\.(\\d+)',
'BlackBerry OS',
None,
None,
None,
None,
),
OSParser(
'(Black[Bb]erry)[0-9a-z]+/(\\d+)\\.(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
'BlackBerry OS',
None,
None,
None,
None,
),
OSParser(
'(Black[Bb]erry).+Version/(\\d+)\\.(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
'BlackBerry OS',
None,
None,
None,
None,
),
OSParser(
'(RIM Tablet OS) (\\d+)\\.(\\d+)\\.(\\d+)',
'BlackBerry Tablet OS',
None,
None,
None,
None,
),
OSParser(
'(Play[Bb]ook)',
'BlackBerry Tablet OS',
None,
None,
None,
None,
),
OSParser(
'(Black[Bb]erry)',
'BlackBerry OS',
None,
None,
None,
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Gecko/18.0 Firefox/\\d+\\.\\d+',
'Firefox OS',
'1',
'0',
'1',
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Gecko/18.1 Firefox/\\d+\\.\\d+',
'Firefox OS',
'1',
'1',
None,
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Gecko/26.0 Firefox/\\d+\\.\\d+',
'Firefox OS',
'1',
'2',
None,
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Gecko/28.0 Firefox/\\d+\\.\\d+',
'Firefox OS',
'1',
'3',
None,
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Gecko/30.0 Firefox/\\d+\\.\\d+',
'Firefox OS',
'1',
'4',
None,
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Gecko/32.0 Firefox/\\d+\\.\\d+',
'Firefox OS',
'2',
'0',
None,
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Gecko/34.0 Firefox/\\d+\\.\\d+',
'Firefox OS',
'2',
'1',
None,
None,
),
OSParser(
'\\((?:Mobile|Tablet);.+Firefox/\\d+\\.\\d+',
'Firefox OS',
None,
None,
None,
None,
),
OSParser(
'(BREW)[ /](\\d+)\\.(\\d+)\\.(\\d+)',
None,
None,
None,
None,
None,
),
OSParser(
'(BREW);',
None,
None,
None,
None,
None,
),
OSParser(
'(Brew MP|BMP)[ /](\\d+)\\.(\\d+)\\.(\\d+)',
'Brew MP',
None,
None,
None,
None,
),
OSParser(
'BMP;',
'Brew MP',
None,
None,
None,
None,
),
OSParser(
'(GoogleTV)(?: (\\d+)\\.(\\d+)(?:\\.(\\d+))?|/[\\da-z]+)',
None,
None,
None,
None,
None,
),
OSParser(
'(WebTV)/(\\d+).(\\d+)',
None,
None,
None,
None,
None,
),
OSParser(
'(hpw|web)OS/(\\d+)\\.(\\d+)(?:\\.(\\d+))?',
'webOS',
None,
None,
None,
None,
),
OSParser(
'(VRE);',
None,
None,
None,
None,
None,
),
OSParser(
'(Fedora|Red Hat|PCLinuxOS|Puppy|Ubuntu|Kindle|Bada|Lubuntu|BackTrack|Slackware|(?:Free|Open|Net|\\b)BSD)[/ ](\\d+)\\.(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?',
None,
None,
None,
None,
None,
),
OSParser(
'(Linux)[ /](\\d+)\\.(\\d+)(?:\\.(\\d+))?.*gentoo',
'Gentoo',
None,
None,
None,
None,
),
OSParser(
'\\((Bada);',
None,
None,
None,
None,
None,
),
OSParser(
'(Windows|Android|WeTab|Maemo)',
None,
None,
None,
None,
None,
),
OSParser(
'(Ubuntu|Kubuntu|Arch Linux|CentOS|Slackware|Gentoo|openSUSE|SUSE|Red Hat|Fedora|PCLinuxOS|Mageia|(?:Free|Open|Net|\\b)BSD)',
None,
None,
None,
None,
None,
),
OSParser(
'(Linux)(?:[ /](\\d+)\\.(\\d+)(?:\\.(\\d+))?)?',
None,
None,
None,
None,
None,
),
OSParser(
'SunOS',
'Solaris',
None,
None,
None,
None,
),
OSParser(
'^(Roku)/DVP-(\\d+)\\.(\\d+)',
None,
None,
None,
None,
None,
),
]
|
kangxu/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-csp-w3c-tests/csp-py/csp_media-src_corss-origin_audio_blocked_ext.py
|
30
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "media-src http://www.w3.org; script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Zhang, Zhiqiang <zhiqiang.zhang@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_media-src_cross-origin_audio_blocked_ext</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="media-src http://www.w3.org; script-src 'self' 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<audio id="m"></audio>
<script>
var t = async_test(document.title);
var m = document.getElementById("m");
m.src = '""" + url1 + """/tests/csp/support/khronos/red-green.theora.ogv';
window.setTimeout(function() {
t.step(function() {
assert_true(m.currentSrc == "",
"audio.currentSrc should be empty after setting src attribute");
});
t.done();
}, 0);
</script>
</body>
</html> """
|
Yubico/python-pyhsm
|
refs/heads/master
|
pyhsm/stick.py
|
2
|
"""
module for actually talking to the YubiHSM
"""
# Copyright (c) 2011 Yubico AB
# See the file COPYING for licence statement.
__all__ = [
# constants
# functions
'read',
'write',
'flush',
# classes
'YHSM_Stick',
]
import sys
import serial
import pyhsm.util
import pyhsm.exception
class YHSM_Stick():
"""
The current YHSM is a USB device using serial communication.
This class exposes the basic functions read, write and flush (input).
"""
def __init__(self, device, timeout=1, debug=False):
"""
Open YHSM device.
"""
self.debug = debug
self.device = device
self.num_read_bytes = 0
self.num_write_bytes = 0
self.ser = None # to not bomb in destructor on open fail
self.ser = serial.serial_for_url(device)
self.ser.baudrate = 115200
self.ser.timeout = timeout
if self.debug:
sys.stderr.write("%s: OPEN %s\n" %(
self.__class__.__name__,
self.ser
))
return None
def acquire(self):
"""
Do nothing
"""
return self.acquire
def write(self, data, debug_info=None):
"""
Write data to YHSM device.
"""
self.num_write_bytes += len(data)
if self.debug:
if not debug_info:
debug_info = str(len(data))
sys.stderr.write("%s: WRITE %s:\n%s\n" %(
self.__class__.__name__,
debug_info,
pyhsm.util.hexdump(data)
))
return self.ser.write(data)
def read(self, num_bytes, debug_info=None):
"""
Read a number of bytes from YubiHSM device.
"""
if self.debug:
if not debug_info:
debug_info = str(num_bytes)
sys.stderr.write("%s: READING %s\n" %(
self.__class__.__name__,
debug_info
))
res = self.ser.read(num_bytes)
if self.debug:
sys.stderr.write("%s: READ %i:\n%s\n" %(
self.__class__.__name__,
len(res),
pyhsm.util.hexdump(res)
))
self.num_read_bytes += len(res)
return res
def flush(self):
"""
Flush input buffers.
"""
if self.debug:
sys.stderr.write("%s: FLUSH INPUT (%i bytes waiting)\n" %(
self.__class__.__name__,
self.ser.inWaiting()
))
self.ser.flushInput()
def drain(self):
""" Drain input. """
if self.debug:
sys.stderr.write("%s: DRAIN INPUT (%i bytes waiting)\n" %(
self.__class__.__name__,
self.ser.inWaiting()
))
old_timeout = self.ser.timeout
self.ser.timeout = 0.1
data = self.ser.read(1)
while len(data):
if self.debug:
sys.stderr.write("%s: DRAINED 0x%x (%c)\n" %(self.__class__.__name__, ord(data[0]), data[0]))
data = self.ser.read(1)
self.ser.timeout = old_timeout
return True
def raw_device(self):
""" Get raw serial device. Only intended for test code/debugging! """
return self.ser
def set_debug(self, new):
"""
Set debug mode (boolean).
Returns old setting.
"""
if type(new) is not bool:
raise pyhsm.exception.YHSM_WrongInputType(
'new', bool, type(new))
old = self.debug
self.debug = new
return old
def __repr__(self):
return '<%s instance at %s: %s - r:%i w:%i>' % (
self.__class__.__name__,
hex(id(self)),
self.device,
self.num_read_bytes,
self.num_write_bytes
)
def __del__(self):
"""
Close device when YHSM instance is destroyed.
"""
if self.debug:
sys.stderr.write("%s: CLOSE %s\n" %(
self.__class__.__name__,
self.ser
))
if self.ser:
self.ser.close()
|
jcowley/python-openid
|
refs/heads/master
|
openid/yadis/constants.py
|
179
|
__all__ = ['YADIS_HEADER_NAME', 'YADIS_CONTENT_TYPE', 'YADIS_ACCEPT_HEADER']
from openid.yadis.accept import generateAcceptHeader
YADIS_HEADER_NAME = 'X-XRDS-Location'
YADIS_CONTENT_TYPE = 'application/xrds+xml'
# A value suitable for using as an accept header when performing YADIS
# discovery, unless the application has special requirements
YADIS_ACCEPT_HEADER = generateAcceptHeader(
('text/html', 0.3),
('application/xhtml+xml', 0.5),
(YADIS_CONTENT_TYPE, 1.0),
)
|
mozilla/stoneridge
|
refs/heads/master
|
python/src/Demo/tkinter/matt/canvas-with-scrollbars.py
|
46
|
from Tkinter import *
# This example program creates a scroling canvas, and demonstrates
# how to tie scrollbars and canvses together. The mechanism
# is analogus for listboxes and other widgets with
# "xscroll" and "yscroll" configuration options.
class Test(Frame):
def printit(self):
print "hi"
def createWidgets(self):
self.question = Label(self, text="Can Find The BLUE Square??????")
self.question.pack()
self.QUIT = Button(self, text='QUIT', background='red',
height=3, command=self.quit)
self.QUIT.pack(side=BOTTOM, fill=BOTH)
spacer = Frame(self, height="0.25i")
spacer.pack(side=BOTTOM)
# notice that the scroll region (20" x 20") is larger than
# displayed size of the widget (5" x 5")
self.draw = Canvas(self, width="5i", height="5i",
background="white",
scrollregion=(0, 0, "20i", "20i"))
self.draw.scrollX = Scrollbar(self, orient=HORIZONTAL)
self.draw.scrollY = Scrollbar(self, orient=VERTICAL)
# now tie the three together. This is standard boilerplate text
self.draw['xscrollcommand'] = self.draw.scrollX.set
self.draw['yscrollcommand'] = self.draw.scrollY.set
self.draw.scrollX['command'] = self.draw.xview
self.draw.scrollY['command'] = self.draw.yview
# draw something. Note that the first square
# is visible, but you need to scroll to see the second one.
self.draw.create_rectangle(0, 0, "3.5i", "3.5i", fill="black")
self.draw.create_rectangle("10i", "10i", "13.5i", "13.5i", fill="blue")
# pack 'em up
self.draw.scrollX.pack(side=BOTTOM, fill=X)
self.draw.scrollY.pack(side=RIGHT, fill=Y)
self.draw.pack(side=LEFT)
def scrollCanvasX(self, *args):
print "scrolling", args
print self.draw.scrollX.get()
def __init__(self, master=None):
Frame.__init__(self, master)
Pack.config(self)
self.createWidgets()
test = Test()
test.mainloop()
|
gsnbng/erpnext
|
refs/heads/develop
|
erpnext/healthcare/doctype/patient_assessment/test_patient_assessment.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestPatientAssessment(unittest.TestCase):
pass
|
MotionDogs/Lenzhound-dev-pre-release
|
refs/heads/master
|
tools/gtest/scripts/pump.py
|
2471
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
home-assistant/home-assistant
|
refs/heads/dev
|
tests/components/advantage_air/test_sensor.py
|
5
|
"""Test the Advantage Air Sensor Platform."""
from json import loads
from homeassistant.components.advantage_air.const import DOMAIN as ADVANTAGE_AIR_DOMAIN
from homeassistant.components.advantage_air.sensor import (
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
ADVANTAGE_AIR_SET_COUNTDOWN_VALUE,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import entity_registry as er
from tests.components.advantage_air import (
TEST_SET_RESPONSE,
TEST_SET_URL,
TEST_SYSTEM_DATA,
TEST_SYSTEM_URL,
add_mock_config,
)
async def test_sensor_platform(hass, aioclient_mock):
"""Test sensor platform."""
aioclient_mock.get(
TEST_SYSTEM_URL,
text=TEST_SYSTEM_DATA,
)
aioclient_mock.get(
TEST_SET_URL,
text=TEST_SET_RESPONSE,
)
await add_mock_config(hass)
registry = er.async_get(hass)
assert len(aioclient_mock.mock_calls) == 1
# Test First TimeToOn Sensor
entity_id = "sensor.ac_one_time_to_on"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOn"
value = 20
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 3
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOn"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First TimeToOff Sensor
entity_id = "sensor.ac_one_time_to_off"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-timetoOff"
value = 0
await hass.services.async_call(
ADVANTAGE_AIR_DOMAIN,
ADVANTAGE_AIR_SERVICE_SET_TIME_TO,
{ATTR_ENTITY_ID: [entity_id], ADVANTAGE_AIR_SET_COUNTDOWN_VALUE: value},
blocking=True,
)
assert len(aioclient_mock.mock_calls) == 5
assert aioclient_mock.mock_calls[-2][0] == "GET"
assert aioclient_mock.mock_calls[-2][1].path == "/setAircon"
data = loads(aioclient_mock.mock_calls[-2][1].query["json"])
assert data["ac1"]["info"]["countDownToOff"] == value
assert aioclient_mock.mock_calls[-1][0] == "GET"
assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
# Test First Zone Vent Sensor
entity_id = "sensor.zone_open_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 100
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-vent"
# Test Second Zone Vent Sensor
entity_id = "sensor.zone_closed_with_sensor_vent"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 0
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-vent"
# Test First Zone Signal Sensor
entity_id = "sensor.zone_open_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 40
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z01-signal"
# Test Second Zone Signal Sensor
entity_id = "sensor.zone_closed_with_sensor_signal"
state = hass.states.get(entity_id)
assert state
assert int(state.state) == 10
entry = registry.async_get(entity_id)
assert entry
assert entry.unique_id == "uniqueid-ac1-z02-signal"
|
d40223223/608
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/result.py
|
727
|
"""Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
#fixme brython
pass
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
#fix me brython
#return '__unittest' in tb.tb_frame.f_globals
return True #for now, lets just return False
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
ohgodscience/Python
|
refs/heads/master
|
mousetrackerproc/mousetracker_processing.py
|
3
|
import pandas as pd
import os
rootdir = 'C:/Users/Bryan/Desktop/MT_corrected'
def self_chips(line):
pid = ""
answer = ""
for i, char in enumerate(line):
if i <= 6:
pid = str(pid) + str(char)
if i > 63 and i < 67:
answer = answer + char
return [pid, answer]
def all_participant_colors(rootdir):
participant_colors = []
for subdir, dirs, files in os.walk(rootdir):
for file in files:
file_name = open(rootdir + "/" + file)
for i, line in enumerate(file_name):
try:
if i > 4 and i < 75 and line[23] == 'Y':
participant_colors.append(self_chips(line))
except IndexError:
pass
file_name.close()
return participant_colors
hopefully_this_works = all_participant_colors(rootdir)
hopefully_this_works = pd.DataFrame(hopefully_this_works)
hopefully_this_works.to_csv("answer.csv", index = False, header = False)
|
tudorvio/nova
|
refs/heads/master
|
nova/api/openstack/compute/plugins/v3/admin_actions.py
|
33
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import reset_server_state
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import vm_states
from nova import exception
ALIAS = "os-admin-actions"
# States usable in resetState action
# NOTE: It is necessary to update the schema of nova/api/openstack/compute/
# schemas/v3/reset_server_state.py, when updating this state_map.
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
authorize = extensions.os_compute_authorizer(ALIAS)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, action='reset_network')
try:
instance = common.get_instance(self.compute_api, context, id)
self.compute_api.reset_network(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, action='inject_network_info')
try:
instance = common.get_instance(self.compute_api, context, id)
self.compute_api.inject_network_info(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((400, 404))
@wsgi.action('os-resetState')
@validation.schema(reset_server_state.reset_state)
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, action='reset_state')
# Identify the desired state from the body
state = state_map[body["os-resetState"]["state"]]
instance = common.get_instance(self.compute_api, context, id)
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
class AdminActions(extensions.V3APIExtensionBase):
"""Enable admin-only server actions
Actions include: resetNetwork, injectNetworkInfo, os-resetState
"""
name = "AdminActions"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
ake-koomsin/mapnik_nvpr
|
refs/heads/master
|
tests/visual_tests/test_python.py
|
2
|
#!/usr/bin/env python
import mapnik
import sys
import os.path
from compare import compare, summary
dirname = os.path.dirname(__file__)
class MyText(mapnik.FormattingNode):
def __init__(self):
mapnik.FormattingNode.__init__(self)
self.expr = mapnik.Expression("[name]")
self.expr_nr = mapnik.Expression("[nr]")
def apply(self, properties, feature, output):
colors = [mapnik.Color('red'),
mapnik.Color('green'),
mapnik.Color('blue')]
text = self.expr.evaluate(feature)
if int(feature['nr']) > 5:
i = 0
my_properties = mapnik.CharProperties(properties)
for char in text:
my_properties.fill = colors[i % len(colors)]
output.append(my_properties, char)
i += 1
else:
output.append(properties, text)
def add_expressions(self, output):
output.insert(self.expr)
output.insert(self.expr_nr)
class IfElse(mapnik.FormattingNode):
def __init__(self, condition, if_node, else_node):
mapnik.FormattingNode.__init__(self)
self.condition = mapnik.Expression(condition)
self.if_node = if_node
self.else_node = else_node
def apply(self, properties, feature, output):
c = self.condition.evaluate(feature)
if c:
self.if_node.apply(properties, feature, output)
else:
self.else_node.apply(properties, feature, output)
def add_expressions(self, output):
output.insert(self.condition)
self.if_node.add_expressions(output)
self.else_node.add_expressions(output)
m = mapnik.Map(600, 100)
m.background = mapnik.Color('white')
text = mapnik.TextSymbolizer()
text.placements.defaults.displacement = (0, 5)
text.placements.defaults.format.face_name = 'DejaVu Sans Book'
point = mapnik.PointSymbolizer()
rule = mapnik.Rule()
rule.symbols.append(text)
rule.symbols.append(point)
style = mapnik.Style()
style.rules.append(rule)
m.append_style('Style', style)
layer = mapnik.Layer('Layer')
layer.datasource = mapnik.Osm(file=os.path.join(dirname,"data/points.osm"))
layer.styles.append('Style')
m.layers.append(layer)
bbox = mapnik.Box2d(-0.05, -0.01, 0.95, 0.01)
m.zoom_to_box(bbox)
formatnode = mapnik.FormattingFormat()
formatnode.child = mapnik.FormattingText("[name]")
formatnode.fill = mapnik.Color("green")
format_trees = [
('TextNode', mapnik.FormattingText("[name]")),
('MyText', MyText()),
('IfElse', IfElse("[nr] != '5'",
mapnik.FormattingText("[name]"),
mapnik.FormattingText("'SPECIAL!'"))),
('Format', formatnode),
('List', mapnik.FormattingList([
mapnik.FormattingText("[name]+'\n'"),
MyText()
])
)
]
for format_tree in format_trees:
text.placements.defaults.format_tree = format_tree[1]
mapnik.render_to_file(m, os.path.join(dirname,"images", 'python-%s.png' % format_tree[0]), 'png')
compare(os.path.join(dirname,"images", 'python-%s.png' % format_tree[0]),
os.path.join(dirname,"images", 'python-%s-reference.png' % format_tree[0])
)
summary()
|
stevenbrichards/boto
|
refs/heads/develop
|
tests/integration/cognito/__init__.py
|
112
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from tests.compat import unittest
class CognitoTest(unittest.TestCase):
def setUp(self):
self.cognito_identity = boto.connect_cognito_identity()
self.cognito_sync = boto.connect_cognito_sync()
self.identity_pool_name = 'myIdentityPool'
response = self.cognito_identity.create_identity_pool(
identity_pool_name=self.identity_pool_name,
allow_unauthenticated_identities=False
)
self.identity_pool_id = response['IdentityPoolId']
def tearDown(self):
self.cognito_identity.delete_identity_pool(
identity_pool_id=self.identity_pool_id
)
|
mganeva/mantid
|
refs/heads/master
|
qt/python/mantidqt/widgets/embedded_find_replace_dialog/test/__init__.py
|
2
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
|
wylieswanson/agilepyfs
|
refs/heads/master
|
fs/expose/xmlrpc.py
|
3
|
"""
fs.expose.xmlrpc
================
Server to expose an FS via XML-RPC
This module provides the necessary infrastructure to expose an FS object
over XML-RPC. The main class is 'RPCFSServer', a SimpleXMLRPCServer subclass
designed to expose an underlying FS.
If you need to use a more powerful server than SimpleXMLRPCServer, you can
use the RPCFSInterface class to provide an XML-RPC-compatible wrapper around
an FS object, which can then be exposed using whatever server you choose
(e.g. Twisted's XML-RPC server).
"""
import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
from datetime import datetime
import six
from six import PY3, b
class RPCFSInterface(object):
"""Wrapper to expose an FS via a XML-RPC compatible interface.
The only real trick is using xmlrpclib.Binary objects to transport
the contents of files.
"""
def __init__(self, fs):
super(RPCFSInterface, self).__init__()
self.fs = fs
def encode_path(self, path):
"""Encode a filesystem path for sending over the wire.
Unfortunately XMLRPC only supports ASCII strings, so this method
must return something that can be represented in ASCII. The default
is base64-encoded UTF-8.
"""
if PY3:
return path
return path.encode("utf8").encode("base64")
def decode_path(self, path):
"""Decode paths arriving over the wire."""
if PY3:
return path
return path.decode("base64").decode("utf8")
def getmeta(self, meta_name):
meta = self.fs.getmeta(meta_name)
return meta
def getmeta_default(self, meta_name, default):
meta = self.fs.getmeta(meta_name, default)
return meta
def hasmeta(self, meta_name):
return self.fs.hasmeta(meta_name)
def get_contents(self, path, mode="rb"):
path = self.decode_path(path)
data = self.fs.getcontents(path, mode)
return xmlrpclib.Binary(data)
def set_contents(self, path, data):
path = self.decode_path(path)
self.fs.setcontents(path,data.data)
def exists(self, path):
path = self.decode_path(path)
return self.fs.exists(path)
def isdir(self, path):
path = self.decode_path(path)
return self.fs.isdir(path)
def isfile(self, path):
path = self.decode_path(path)
return self.fs.isfile(path)
def listdir(self, path="./", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
path = self.decode_path(path)
entries = self.fs.listdir(path,wildcard,full,absolute,dirs_only,files_only)
return [self.encode_path(e) for e in entries]
def makedir(self, path, recursive=False, allow_recreate=False):
path = self.decode_path(path)
return self.fs.makedir(path, recursive, allow_recreate)
def remove(self, path):
path = self.decode_path(path)
return self.fs.remove(path)
def removedir(self, path, recursive=False, force=False):
path = self.decode_path(path)
return self.fs.removedir(path, recursive, force)
def rename(self, src, dst):
src = self.decode_path(src)
dst = self.decode_path(dst)
return self.fs.rename(src, dst)
def settimes(self, path, accessed_time, modified_time):
path = self.decode_path(path)
if isinstance(accessed_time, xmlrpclib.DateTime):
accessed_time = datetime.strptime(accessed_time.value, "%Y%m%dT%H:%M:%S")
if isinstance(modified_time, xmlrpclib.DateTime):
modified_time = datetime.strptime(modified_time.value, "%Y%m%dT%H:%M:%S")
return self.fs.settimes(path, accessed_time, modified_time)
def getinfo(self, path):
path = self.decode_path(path)
info = self.fs.getinfo(path)
return info
def desc(self, path):
path = self.decode_path(path)
return self.fs.desc(path)
def getxattr(self, path, attr, default=None):
path = self.decode_path(path)
attr = self.decode_path(attr)
return self.fs.getxattr(path, attr, default)
def setxattr(self, path, attr, value):
path = self.decode_path(path)
attr = self.decode_path(attr)
return self.fs.setxattr(path, attr, value)
def delxattr(self, path, attr):
path = self.decode_path(path)
attr = self.decode_path(attr)
return self.fs.delxattr(path, attr)
def listxattrs(self, path):
path = self.decode_path(path)
return [self.encode_path(a) for a in self.fs.listxattrs(path)]
def copy(self, src, dst, overwrite=False, chunk_size=16384):
src = self.decode_path(src)
dst = self.decode_path(dst)
return self.fs.copy(src, dst, overwrite, chunk_size)
def move(self,src,dst,overwrite=False,chunk_size=16384):
src = self.decode_path(src)
dst = self.decode_path(dst)
return self.fs.move(src, dst, overwrite, chunk_size)
def movedir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
src = self.decode_path(src)
dst = self.decode_path(dst)
return self.fs.movedir(src, dst, overwrite, ignore_errors, chunk_size)
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
src = self.decode_path(src)
dst = self.decode_path(dst)
return self.fs.copydir(src, dst, overwrite, ignore_errors, chunk_size)
class RPCFSServer(SimpleXMLRPCServer):
"""Server to expose an FS object via XML-RPC.
This class takes as its first argument an FS instance, and as its second
argument a (hostname,port) tuple on which to listen for XML-RPC requests.
Example::
fs = OSFS('/var/srv/myfiles')
s = RPCFSServer(fs,("",8080))
s.serve_forever()
To cleanly shut down the server after calling serve_forever, set the
attribute "serve_more_requests" to False.
"""
def __init__(self, fs, addr, requestHandler=None, logRequests=None):
kwds = dict(allow_none=True)
if requestHandler is not None:
kwds['requestHandler'] = requestHandler
if logRequests is not None:
kwds['logRequests'] = logRequests
self.serve_more_requests = True
SimpleXMLRPCServer.__init__(self,addr,**kwds)
self.register_instance(RPCFSInterface(fs))
def serve_forever(self):
"""Override serve_forever to allow graceful shutdown."""
while self.serve_more_requests:
self.handle_request()
|
ronbelson/ghost-proxy-hebrew
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/native.py
|
364
|
# -*- coding: utf-8 -*-
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#202020'
highlight_color = '#404040'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #999999',
Comment.Preproc: 'noitalic bold #cd2828',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ab825',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ab825',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#3677a9',
Name.Builtin: '#24909d',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #447fcf',
Name.Function: '#447fcf',
Name.Namespace: 'underline #447fcf',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ab825',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#d22323',
Generic.Inserted: '#589819',
Generic.Error: '#d22323',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#d22323',
Error: 'bg:#e3d2d2 #a61717'
}
|
ariff/robotframework-selenium2library
|
refs/heads/master
|
setup.py
|
11
|
#!/usr/bin/env python
import sys
from os.path import join, dirname
sys.path.append(join(dirname(__file__), 'src'))
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
execfile(join(dirname(__file__), 'src', 'Selenium2Library', 'version.py'))
DESCRIPTION = """
Selenium2Library is a web testing library for Robot Framework
that leverages the Selenium 2 (WebDriver) libraries.
"""[1:-1]
setup(name = 'robotframework-selenium2library',
version = VERSION,
description = 'Web testing library for Robot Framework',
long_description = DESCRIPTION,
author = 'Ryan Tomac , Ed Manlove , Jeremy Johnson',
author_email = '<ryan@tomacfamily.com> , <devPyPlTw@verizon.net> , <jeremy@softworks.com.my>',
url = 'https://github.com/rtomac/robotframework-selenium2library',
license = 'Apache License 2.0',
keywords = 'robotframework testing testautomation selenium selenium2 webdriver web',
platforms = 'any',
classifiers = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing"
],
install_requires = [
'decorator >= 3.3.2',
'selenium >= 2.8.1',
'robotframework >= 2.6.0',
'docutils >= 0.8.1'
],
py_modules=['ez_setup'],
package_dir = {'' : 'src'},
packages = ['Selenium2Library','Selenium2Library.keywords','Selenium2Library.locators',
'Selenium2Library.utils'],
include_package_data = True,
)
|
ivan-fedorov/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/namespacePackageUsedInMovedFunction/after/src/b.py
|
79
|
import nspkg
def func():
print(nspkg)
|
hatwar/buyback-frappe
|
refs/heads/master
|
frappe/patches/v5_0/fix_email_alert.py
|
61
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("Email Alert")
for e in frappe.get_all("Email Alert"):
email_alert = frappe.get_doc("Email Alert", e.name)
if email_alert.event == "Date Change":
if email_alert.days_in_advance < 0:
email_alert.event = "Days After"
email_alert.days_in_advance = -email_alert.days_in_advance
else:
email_alert.event = "Days Before"
email_alert.save()
|
tiagofrepereira2012/tensorflow
|
refs/heads/master
|
tensorflow/contrib/labeled_tensor/__init__.py
|
144
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Labels for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.labeled_tensor.python.ops import core as _core
from tensorflow.contrib.labeled_tensor.python.ops import io_ops as _io_ops
from tensorflow.contrib.labeled_tensor.python.ops import nn
from tensorflow.contrib.labeled_tensor.python.ops import ops as _ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar as _sugar
# pylint: disable=invalid-name
# Core types.
Axis = _core.Axis
Axes = _core.Axes
LabeledTensor = _core.LabeledTensor
as_axis = _core.as_axis
convert_to_labeled_tensor = _core.convert_to_labeled_tensor
identity = _core.identity
slice = _core.slice_function # pylint: disable=redefined-builtin
transpose = _core.transpose
expand_dims = _core.expand_dims
align = _core.align
axis_order_scope = _core.axis_order_scope
check_axis_order = _core.check_axis_order
impose_axis_order = _core.impose_axis_order
AxisOrderError = _core.AxisOrderError
define_unary_op = _core.define_unary_op
define_binary_op = _core.define_binary_op
define_reduce_op = _ops.define_reduce_op
abs = _core.abs_function # pylint: disable=redefined-builtin
neg = _core.neg
sign = _core.sign
reciprocal = _core.reciprocal
square = _core.square
round = _core.round_function # pylint: disable=redefined-builtin
sqrt = _core.sqrt
rsqrt = _core.rsqrt
exp = _core.exp
log = _core.log
ceil = _core.ceil
floor = _core.floor
cos = _core.cos
sin = _core.sin
tan = _core.tan
acos = _core.acos
asin = _core.asin
atan = _core.atan
lgamma = _core.lgamma
digamma = _core.digamma
erf = _core.erf
erfc = _core.erfc
logical_not = _core.logical_not
tanh = _core.tanh
sigmoid = _core.sigmoid
add = _core.add
sub = _core.sub
mul = _core.mul
div = _core.div
mod = _core.mod
pow = _core.pow_function # pylint: disable=redefined-builtin
equal = _core.equal
greater = _core.greater
greater_equal = _core.greater_equal
not_equal = _core.not_equal
less = _core.less
less_equal = _core.less_equal
logical_and = _core.logical_and
logical_or = _core.logical_or
logical_xor = _core.logical_xor
maximum = _core.maximum
minimum = _core.minimum
squared_difference = _core.squared_difference
igamma = _core.igamma
igammac = _core.igammac
zeta = _core.zeta
polygamma = _core.polygamma
select = _ops.select
concat = _ops.concat
pack = _ops.pack
unpack = _ops.unpack
reshape = _ops.reshape
rename_axis = _ops.rename_axis
random_crop = _ops.random_crop
map_fn = _ops.map_fn
foldl = _ops.foldl
squeeze = _ops.squeeze
matmul = _ops.matmul
tile = _ops.tile
pad = _ops.pad
constant = _ops.constant
zeros_like = _ops.zeros_like
ones_like = _ops.ones_like
cast = _ops.cast
verify_tensor_all_finite = _ops.verify_tensor_all_finite
boolean_mask = _ops.boolean_mask
where = _ops.where
reduce_all = _ops.reduce_all
reduce_any = _ops.reduce_any
reduce_logsumexp = _ops.reduce_logsumexp
reduce_max = _ops.reduce_max
reduce_mean = _ops.reduce_mean
reduce_min = _ops.reduce_min
reduce_prod = _ops.reduce_prod
reduce_sum = _ops.reduce_sum
batch = _ops.batch
shuffle_batch = _ops.shuffle_batch
FixedLenFeature = _io_ops.FixedLenFeature
parse_example = _io_ops.parse_example
parse_single_example = _io_ops.parse_single_example
placeholder = _io_ops.placeholder
ReshapeCoder = _sugar.ReshapeCoder
|
ammarkhann/FinalSeniorCode
|
refs/heads/master
|
lib/python2.7/site-packages/IPython/core/completerlib.py
|
7
|
# encoding: utf-8
"""Implementations for various useful completers.
These are all loaded by default by IPython.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import glob
import inspect
import os
import re
import sys
try:
# Python >= 3.3
from importlib.machinery import all_suffixes
_suffixes = all_suffixes()
except ImportError:
from imp import get_suffixes
_suffixes = [ s[0] for s in get_suffixes() ]
# Third-party imports
from time import time
from zipimport import zipimporter
# Our own imports
from IPython.core.completer import expand_user, compress_user
from IPython.core.error import TryNext
from IPython.utils._process_common import arg_split
from IPython.utils.py3compat import string_types
# FIXME: this should be pulled in with the right call via the component system
from IPython import get_ipython
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Time in seconds after which the rootmodules will be stored permanently in the
# ipython ip.db database (kept in the user's .ipython dir).
TIMEOUT_STORAGE = 2
# Time in seconds after which we give up
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
r'(?P<package>[/\\]__init__)?'
r'(?P<suffix>%s)$' %
r'|'.join(re.escape(s) for s in _suffixes))
# RE for the ipython %run command (python + ipython scripts)
magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def module_list(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
# A few local constants to be used in loops below
pjoin = os.path.join
if os.path.isdir(path):
# Build a list of all files in the directory and all files
# in its subdirectories. For performance reasons, do not
# recurse more than one level into subdirectories.
files = []
for root, dirs, nondirs in os.walk(path, followlinks=True):
subdir = root[len(path)+1:]
if subdir:
files.extend(pjoin(subdir, f) for f in nondirs)
dirs[:] = [] # Do not recurse into additional subdirectories.
else:
files.extend(nondirs)
else:
try:
files = list(zipimporter(path)._files.keys())
except:
files = []
# Build a list of modules which match the import_re regex.
modules = []
for f in files:
m = import_re.match(f)
if m:
modules.append(m.group('name'))
return list(set(modules))
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
"""
ip = get_ipython()
if ip is None:
# No global shell instance to store cached list of modules.
# Don't try to scan for modules every time.
return list(sys.builtin_module_names)
rootmodules_cache = ip.db.get('rootmodules_cache', {})
rootmodules = list(sys.builtin_module_names)
start_time = time()
store = False
for path in sys.path:
try:
modules = rootmodules_cache[path]
except KeyError:
modules = module_list(path)
try:
modules.remove('__init__')
except ValueError:
pass
if path not in ('', '.'): # cwd modules should not be cached
rootmodules_cache[path] = modules
if time() - start_time > TIMEOUT_STORAGE and not store:
store = True
print("\nCaching the list of root modules, please wait!")
print("(This will only be done once - type '%rehashx' to "
"reset cache!)\n")
sys.stdout.flush()
if time() - start_time > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
return []
rootmodules.extend(modules)
if store:
ip.db['rootmodules_cache'] = rootmodules_cache
rootmodules = list(set(rootmodules))
return rootmodules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod, only_modules=False):
mod = mod.rstrip('.')
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m, module)
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend( [attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions = {c for c in completions if isinstance(c, string_types)}
completions.discard('__init__')
return list(completions)
#-----------------------------------------------------------------------------
# Completion-related functions.
#-----------------------------------------------------------------------------
def quick_completer(cmd, completions):
""" Easily create a trivial completer for a command.
Takes either a list of completions, or all completions in string (that will
be split on whitespace).
Example::
[d:\ipython]|1> import ipy_completers
[d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
[d:\ipython]|3> foo b<TAB>
bar baz
[d:\ipython]|3> foo ba
"""
if isinstance(completions, string_types):
completions = completions.split()
def do_complete(self, event):
return completions
get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
def module_completion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
#-----------------------------------------------------------------------------
# Completers
#-----------------------------------------------------------------------------
# These all have the func(self, event) signature to be used as custom
# completers
def module_completer(self,event):
"""Give completions after user has typed 'import ...' or 'from ...'"""
# This works in all versions of python. While 2.5 has
# pkgutil.walk_packages(), that particular routine is fairly dangerous,
# since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
# of possibly problematic side effects.
# This search the folders in the sys.path for available modules.
return module_completion(event.line)
# FIXME: there's a lot of logic common to the run, cd and builtin file
# completers, that is currently reimplemented in each.
def magic_run_completer(self, event):
"""Complete files that end in .py or .ipy or .ipynb for the %run command.
"""
comps = arg_split(event.line, strict=False)
# relpath should be the current token that we need to complete.
if (len(comps) > 1) and (not event.line.endswith(' ')):
relpath = comps[-1].strip("'\"")
else:
relpath = ''
#print("\nev=", event) # dbg
#print("rp=", relpath) # dbg
#print('comps=', comps) # dbg
lglob = glob.glob
isdir = os.path.isdir
relpath, tilde_expand, tilde_val = expand_user(relpath)
# Find if the user has already typed the first filename, after which we
# should complete on all files, since after the first one other files may
# be arguments to the input script.
if any(magic_run_re.match(c) for c in comps):
matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
for f in lglob(relpath+'*')]
else:
dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
pys = [f.replace('\\','/')
for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
matches = dirs + pys
#print('run comp:', dirs+pys) # dbg
return [compress_user(p, tilde_expand, tilde_val) for p in matches]
def cd_completer(self, event):
"""Completer function for cd, which only returns directories."""
ip = get_ipython()
relpath = event.symbol
#print(event) # dbg
if event.line.endswith('-b') or ' -b ' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', None)
if bkms:
return bkms.keys()
else:
return []
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
if len(ents) > 1:
return ents
return []
if event.symbol.startswith('--'):
return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
# Expand ~ in path and normalize directory separators.
relpath, tilde_expand, tilde_val = expand_user(relpath)
relpath = relpath.replace('\\','/')
found = []
for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
if os.path.isdir(f)]:
if ' ' in d:
# we don't want to deal with any of that, complex code
# for this is elsewhere
raise TryNext
found.append(d)
if not found:
if os.path.isdir(relpath):
return [compress_user(relpath, tilde_expand, tilde_val)]
# if no completions so far, try bookmarks
bks = self.db.get('bookmarks',{})
bkmatches = [s for s in bks if s.startswith(event.symbol)]
if bkmatches:
return bkmatches
raise TryNext
return [compress_user(p, tilde_expand, tilde_val) for p in found]
def reset_completer(self, event):
"A completer for %reset magic"
return '-f -s in out array dhist'.split()
|
aronparsons/spacewalk
|
refs/heads/master
|
backend/server/repomd/domain.py
|
10
|
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Domain Classes for generating repository metadata from RHN info.
#
class Channel:
""" A pure data object representing an RHN Channel. """
def __init__(self, channel_id):
self.id = channel_id
self.label = None
self.name = None
self.checksum_type = None
self.num_packages = 0
self.packages = []
self.errata = []
self.updateinfo = None
self.comps = None
class Package:
""" A pure data object representing an RHN Package. """
def __init__(self, package_id):
self.id = package_id
self.name = None
self.version = None
self.release = None
self.epoch = 0
self.arch = None
self.checksum = None
self.checksum_type = None
self.summary = None
self.description = None
self.vendor = None
self.build_time = None
self.package_size = None
self.payload_size = None
self.installed_size = None
self.header_start = None
self.header_end = None
self.package_group = None
self.build_host = None
self.copyright = None
self.filename = None
self.source_rpm = None
self.files = []
self.provides = []
self.requires = []
self.conflicts = []
self.obsoletes = []
self.supplements = []
self.enhances = []
self.suggests = []
self.recommends = []
self.changelog = []
class Erratum:
""" An object representing a single update to a channel. """
def __init__(self, erratum_id):
self.id = erratum_id
self.readable_id = None
self.title = None
self.advisory_type = None
self.version = None
self.issued = None
self.updated = None
self.synopsis = None
self.description = None
self.bz_references = []
self.cve_references = []
# We don't want to pickle a single package multiple times,
# So here's a list to store the ids and we can swap out the
# Actual objects when its time to pickle. This should be replaced
# With something that keeps the concepts seperate.
self.package_ids = []
self.packages = []
class Comps:
def __init__(self, comps_id, filename):
self.id = comps_id
self.filename = filename
|
mikepea/fullerite
|
refs/heads/master
|
src/diamond/collectors/sqs/sqs.py
|
56
|
# coding=utf-8
"""
The SQS collector collects metrics for one or more Amazon AWS SQS queues
#### Configuration
Below is an example configuration for the SQSCollector.
You can specify an arbitrary amount of regions
```
enabled = True
interval = 60
[regions]
[[region-code]]
access_key_id = '...'
secret_access_key = '''
queues = queue_name[,queue_name2[,..]]
```
Note: If you modify the SQSCollector configuration, you will need to
restart diamond.
#### Dependencies
* boto
"""
import diamond.collector
try:
from boto import sqs
except ImportError:
sqs = False
class SqsCollector(diamond.collector.Collector):
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SqsCollector, self).get_default_config()
config.update({
'path': 'sqs',
})
return config
def collect(self):
attribs = ['ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesNotVisible',
'ApproximateNumberOfMessagesDelayed',
'CreatedTimestamp',
'DelaySeconds',
'LastModifiedTimestamp',
'MaximumMessageSize',
'MessageRetentionPeriod',
'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout']
if not sqs:
self.log.error("boto module not found!")
return
for (region, region_cfg) in self.config['regions'].items():
assert 'access_key_id' in region_cfg
assert 'secret_access_key' in region_cfg
assert 'queues' in region_cfg
queues = region_cfg['queues'].split(',')
for queue_name in queues:
conn = sqs.connect_to_region(
region,
aws_access_key_id=region_cfg['access_key_id'],
aws_secret_access_key=region_cfg['secret_access_key'],
)
queue = conn.get_queue(queue_name)
for attrib in attribs:
d = queue.get_attributes(attrib)
self.publish(
'%s.%s.%s' % (region, queue_name, attrib),
d[attrib]
)
|
berkeley-stat159/project-gamma
|
refs/heads/master
|
code/utils/kmeans.py
|
2
|
"""
Wrapper for k-means clustering that takes cares of reshaping and generating labels.
"""
from __future__ import division
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import project_config
import sklearn.cluster
def perform_kMeans_clustering_analysis(feature_data, n_clusters):
"""
Cluster voxel time courses into n_clusters based on euclean distances
between them. It treats all processed BOLD images of each time course as
a separate feature, i.e. in total, img_data.shape[-1] features.
Parameters
----------
feature_data : dimension 1:3 is 3d volumn. The last dimension is a list of feature values.
n_clusters : no. of clusters to segregate the time courses into.
Returns
-------
labels : array has the same shape as img_data.shape. Each elem is the
cluster label of the data.
"""
kMeans = sklearn.cluster.KMeans(n_clusters)
feature_data_2d = feature_data.reshape((-1,feature_data.shape[-1]))
labels = kMeans.fit_predict(feature_data_2d)
return labels.reshape(feature_data.shape[:-1])
|
aewhatley/scikit-learn
|
refs/heads/master
|
sklearn/manifold/tests/test_locally_linear.py
|
232
|
from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
|
YOTOV-LIMITED/kuma
|
refs/heads/master
|
vendor/packages/translate/storage/omegat.py
|
25
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Manage the OmegaT glossary format
OmegaT glossary format is used by the
`OmegaT <http://www.omegat.org/en/omegat.html>`_ computer aided
translation tool.
It is a bilingual base class derived format with :class:`OmegaTFile`
and :class:`OmegaTUnit` providing file and unit level access.
Format Implementation
The OmegaT glossary format is a simple Tab Separated Value (TSV) file
with the columns: source, target, comment.
The dialect of the TSV files is specified by :class:`OmegaTDialect`.
Encoding
The files are either UTF-8 or encoded using the system default. UTF-8
encoded files use the .utf8 extension while system encoded files use
the .tab extension.
"""
import csv
import locale
from translate.storage import base
OMEGAT_FIELDNAMES = ["source", "target", "comment"]
"""Field names for an OmegaT glossary unit"""
class OmegaTDialect(csv.Dialect):
"""Describe the properties of an OmegaT generated TAB-delimited glossary
file."""
delimiter = "\t"
lineterminator = "\r\n"
quoting = csv.QUOTE_NONE
csv.register_dialect("omegat", OmegaTDialect)
class OmegaTUnit(base.TranslationUnit):
"""An OmegaT glossary unit"""
def __init__(self, source=None):
self._dict = {}
if source:
self.source = source
super(OmegaTUnit, self).__init__(source)
def getdict(self):
"""Get the dictionary of values for a OmegaT line"""
return self._dict
def setdict(self, newdict):
"""Set the dictionary of values for a OmegaT line
:param newdict: a new dictionary with OmegaT line elements
:type newdict: Dict
"""
# TODO First check that the values are OK
self._dict = newdict
dict = property(getdict, setdict)
def _get_field(self, key):
if key not in self._dict:
return None
elif self._dict[key]:
return self._dict[key].decode('utf-8')
else:
return ""
def _set_field(self, key, newvalue):
if newvalue is None:
self._dict[key] = None
if isinstance(newvalue, unicode):
newvalue = newvalue.encode('utf-8')
if not key in self._dict or newvalue != self._dict[key]:
self._dict[key] = newvalue
def getnotes(self, origin=None):
return self._get_field('comment')
def addnote(self, text, origin=None, position="append"):
currentnote = self._get_field('comment')
if position == "append" and currentnote is not None and currentnote != u'':
self._set_field('comment', currentnote + '\n' + text)
else:
self._set_field('comment', text)
def removenotes(self):
self._set_field('comment', u'')
def getsource(self):
return self._get_field('source')
def setsource(self, newsource):
self._rich_source = None
return self._set_field('source', newsource)
source = property(getsource, setsource)
def gettarget(self):
return self._get_field('target')
def settarget(self, newtarget):
self._rich_target = None
return self._set_field('target', newtarget)
target = property(gettarget, settarget)
def settargetlang(self, newlang):
self._dict['target-lang'] = newlang
targetlang = property(None, settargetlang)
def __str__(self):
return str(self._dict)
def istranslated(self):
return bool(self._dict.get('target', None))
class OmegaTFile(base.TranslationStore):
"""An OmegaT glossary file"""
Name = "OmegaT Glossary"
Mimetypes = ["application/x-omegat-glossary"]
Extensions = ["utf8"]
def __init__(self, inputfile=None, unitclass=OmegaTUnit):
"""Construct an OmegaT glossary, optionally reading in from
inputfile."""
self.UnitClass = unitclass
base.TranslationStore.__init__(self, unitclass=unitclass)
self.filename = ''
self.extension = ''
self._encoding = self._get_encoding()
if inputfile is not None:
self.parse(inputfile)
def _get_encoding(self):
return 'utf-8'
def parse(self, input):
"""parsese the given file or file source string"""
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
tmsrc = input.read()
input.close()
input = tmsrc
try:
input = input.decode(self._encoding).encode('utf-8')
except:
raise ValueError("OmegaT files are either UTF-8 encoded or use the default system encoding")
lines = csv.DictReader(input.split("\n"), fieldnames=OMEGAT_FIELDNAMES,
dialect="omegat")
for line in lines:
newunit = OmegaTUnit()
newunit.dict = line
self.addunit(newunit)
def __str__(self):
output = csv.StringIO()
writer = csv.DictWriter(output, fieldnames=OMEGAT_FIELDNAMES,
dialect="omegat")
unit_count = 0
for unit in self.units:
if unit.istranslated():
unit_count += 1
writer.writerow(unit.dict)
if unit_count == 0:
return ""
output.reset()
decoded = "".join(output.readlines()).decode('utf-8')
try:
return decoded.encode(self._encoding)
except UnicodeEncodeError:
return decoded.encode('utf-8')
class OmegaTFileTab(OmegaTFile):
"""An OmegaT glossary file in the default system encoding"""
Name = "OmegaT Glossary"
Mimetypes = ["application/x-omegat-glossary"]
Extensions = ["tab"]
def _get_encoding(self):
return locale.getdefaultlocale()[1]
|
samthor/intellij-community
|
refs/heads/master
|
python/testData/quickdoc/PropOldSetter.py
|
83
|
class A(object):
def __getX(self, x):
"Doc of getter"
self.__x = x
x = property(fset=__getX)
A().<the_ref>x = 1
|
shitolepriya/Saloon_erp
|
refs/heads/master
|
erpnext/manufacturing/doctype/bom/test_bom.py
|
62
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import cstr
test_records = frappe.get_test_records('BOM')
class TestBOM(unittest.TestCase):
def test_get_items(self):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict
items_dict = get_bom_items_as_dict(bom=get_default_bom(), company="_Test Company", qty=1, fetch_exploded=0)
self.assertTrue(test_records[2]["items"][0]["item_code"] in items_dict)
self.assertTrue(test_records[2]["items"][1]["item_code"] in items_dict)
self.assertEquals(len(items_dict.values()), 2)
def test_get_items_exploded(self):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict
items_dict = get_bom_items_as_dict(bom=get_default_bom(), company="_Test Company", qty=1, fetch_exploded=1)
self.assertTrue(test_records[2]["items"][0]["item_code"] in items_dict)
self.assertFalse(test_records[2]["items"][1]["item_code"] in items_dict)
self.assertTrue(test_records[0]["items"][0]["item_code"] in items_dict)
self.assertTrue(test_records[0]["items"][1]["item_code"] in items_dict)
self.assertEquals(len(items_dict.values()), 3)
def test_get_items_list(self):
from erpnext.manufacturing.doctype.bom.bom import get_bom_items
self.assertEquals(len(get_bom_items(bom=get_default_bom(), company="_Test Company")), 3)
def test_default_bom(self):
def _get_default_bom_in_item():
return cstr(frappe.db.get_value("Item", "_Test FG Item 2", "default_bom"))
bom = frappe.get_doc("BOM", {"item":"_Test FG Item 2", "is_default": 1})
self.assertEqual(_get_default_bom_in_item(), bom.name)
bom.is_active = 0
bom.save()
self.assertEqual(_get_default_bom_in_item(), "")
bom.is_active = 1
bom.is_default=1
bom.save()
self.assertTrue(_get_default_bom_in_item(), bom.name)
def get_default_bom(item_code="_Test FG Item 2"):
return frappe.db.get_value("BOM", {"item": item_code, "is_active": 1, "is_default": 1})
|
chriskmanx/qmole
|
refs/heads/master
|
QMOLEDEV/node/tools/scons/scons-local-1.2.0/SCons/Memoize.py
|
12
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Memoize.py 3842 2008/12/20 22:59:52 scons"
__doc__ = """Memoizer
A metaclass implementation to count hits and misses of the computed
values that various methods cache in memory.
Use of this modules assumes that wrapped methods be coded to cache their
values in a consistent way. Here is an example of wrapping a method
that returns a computed value, with no input parameters:
memoizer_counters = [] # Memoization
memoizer_counters.append(SCons.Memoize.CountValue('foo')) # Memoization
def foo(self):
try: # Memoization
return self._memo['foo'] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_foo_value()
self._memo['foo'] = result # Memoization
return result
Here is an example of wrapping a method that will return different values
based on one or more input arguments:
def _bar_key(self, argument): # Memoization
return argument # Memoization
memoizer_counters.append(SCons.Memoize.CountDict('bar', _bar_key)) # Memoization
def bar(self, argument):
memo_key = argument # Memoization
try: # Memoization
memo_dict = self._memo['bar'] # Memoization
except KeyError: # Memoization
memo_dict = {} # Memoization
self._memo['dict'] = memo_dict # Memoization
else: # Memoization
try: # Memoization
return memo_dict[memo_key] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_bar_value(argument)
memo_dict[memo_key] = result # Memoization
return result
At one point we avoided replicating this sort of logic in all the methods
by putting it right into this module, but we've moved away from that at
present (see the "Historical Note," below.).
Deciding what to cache is tricky, because different configurations
can have radically different performance tradeoffs, and because the
tradeoffs involved are often so non-obvious. Consequently, deciding
whether or not to cache a given method will likely be more of an art than
a science, but should still be based on available data from this module.
Here are some VERY GENERAL guidelines about deciding whether or not to
cache return values from a method that's being called a lot:
-- The first question to ask is, "Can we change the calling code
so this method isn't called so often?" Sometimes this can be
done by changing the algorithm. Sometimes the *caller* should
be memoized, not the method you're looking at.
-- The memoized function should be timed with multiple configurations
to make sure it doesn't inadvertently slow down some other
configuration.
-- When memoizing values based on a dictionary key composed of
input arguments, you don't need to use all of the arguments
if some of them don't affect the return values.
Historical Note: The initial Memoizer implementation actually handled
the caching of values for the wrapped methods, based on a set of generic
algorithms for computing hashable values based on the method's arguments.
This collected caching logic nicely, but had two drawbacks:
Running arguments through a generic key-conversion mechanism is slower
(and less flexible) than just coding these things directly. Since the
methods that need memoized values are generally performance-critical,
slowing them down in order to collect the logic isn't the right
tradeoff.
Use of the memoizer really obscured what was being called, because
all the memoized methods were wrapped with re-used generic methods.
This made it more difficult, for example, to use the Python profiler
to figure out how to optimize the underlying methods.
"""
import new
# A flag controlling whether or not we actually use memoization.
use_memoizer = None
CounterList = []
class Counter:
"""
Base class for counting memoization hits and misses.
We expect that the metaclass initialization will have filled in
the .name attribute that represents the name of the function
being counted.
"""
def __init__(self, method_name):
"""
"""
self.method_name = method_name
self.hit = 0
self.miss = 0
CounterList.append(self)
def display(self):
fmt = " %7d hits %7d misses %s()"
print fmt % (self.hit, self.miss, self.name)
def __cmp__(self, other):
try:
return cmp(self.name, other.name)
except AttributeError:
return 0
class CountValue(Counter):
"""
A counter class for simple, atomic memoized values.
A CountValue object should be instantiated in a class for each of
the class's methods that memoizes its return value by simply storing
the return value in its _memo dictionary.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether
its memoized value has already been set (a hit) or not (a miss).
"""
def __call__(self, *args, **kw):
obj = args[0]
if obj._memo.has_key(self.method_name):
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return apply(self.underlying_method, args, kw)
class CountDict(Counter):
"""
A counter class for memoized values stored in a dictionary, with
keys based on the method's input arguments.
A CountDict object is instantiated in a class for each of the
class's methods that memoizes its return value in a dictionary,
indexed by some key that can be computed from one or more of
its input arguments.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether the
computed key value is already present in the memoization dictionary
(a hit) or not (a miss).
"""
def __init__(self, method_name, keymaker):
"""
"""
Counter.__init__(self, method_name)
self.keymaker = keymaker
def __call__(self, *args, **kw):
obj = args[0]
try:
memo_dict = obj._memo[self.method_name]
except KeyError:
self.miss = self.miss + 1
else:
key = apply(self.keymaker, args, kw)
if memo_dict.has_key(key):
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return apply(self.underlying_method, args, kw)
class Memoizer:
"""Object which performs caching of method calls for its 'primary'
instance."""
def __init__(self):
pass
# Find out if we support metaclasses (Python 2.2 and later).
class M:
def __init__(cls, name, bases, cls_dict):
cls.use_metaclass = 1
def fake_method(self):
pass
new.instancemethod(fake_method, None, cls)
try:
class A:
__metaclass__ = M
use_metaclass = A.use_metaclass
except AttributeError:
use_metaclass = None
reason = 'no metaclasses'
except TypeError:
use_metaclass = None
reason = 'new.instancemethod() bug'
else:
del A
del M
if not use_metaclass:
def Dump(title):
pass
try:
class Memoized_Metaclass(type):
# Just a place-holder so pre-metaclass Python versions don't
# have to have special code for the Memoized classes.
pass
except TypeError:
class Memoized_Metaclass:
# A place-holder so pre-metaclass Python versions don't
# have to have special code for the Memoized classes.
pass
def EnableMemoization():
import SCons.Warnings
msg = 'memoization is not supported in this version of Python (%s)'
raise SCons.Warnings.NoMetaclassSupportWarning, msg % reason
else:
def Dump(title=None):
if title:
print title
CounterList.sort()
for counter in CounterList:
counter.display()
class Memoized_Metaclass(type):
def __init__(cls, name, bases, cls_dict):
super(Memoized_Metaclass, cls).__init__(name, bases, cls_dict)
for counter in cls_dict.get('memoizer_counters', []):
method_name = counter.method_name
counter.name = cls.__name__ + '.' + method_name
counter.underlying_method = cls_dict[method_name]
replacement_method = new.instancemethod(counter, None, cls)
setattr(cls, method_name, replacement_method)
def EnableMemoization():
global use_memoizer
use_memoizer = 1
|
pinkra/neisse
|
refs/heads/master
|
src/agent/test/testComm.py
|
1
|
import sys, os
import re
__dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(__dir + '/../model')
import unittest
from Comm import Comm
class TestComm(unittest.TestCase):
def test_comm(self):
comm = Comm('localhost')
comm.send('test')
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
zycdragonball/tensorflow
|
refs/heads/master
|
tensorflow/contrib/opt/python/training/delay_compensated_gradient_descent.py
|
23
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DelayCompensatedGradientDescentOptimizer for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class _RefVariableAsynchronousProcessor(optimizer._RefVariableProcessor):
"""Processor for Variable."""
def update_op_asynchronous(self, optimizer, g, index):
if isinstance(g, ops.Tensor):
return optimizer._apply_dense(g, self._v, index)
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v, index)
class _DenseResourceVariableAsynchronousProcessor(optimizer._DenseResourceVariableProcessor):
"""Processor for dense ResourceVariables."""
def update_op_asynchronous(self, optimizer, g, index):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices, index)
return optimizer._resource_apply_dense(g, self._v, index)
def _get_processor(v):
"""The processor of v."""
if v.op.type == "VarHandleOp":
return _DenseResourceVariableAsynchronousProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableAsynchronousProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
class DelayCompensatedGradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements gradient descent with delay compensation.
See [Zheng, Shuxin, et al., 2016](https://arxiv.org/abs/1609.08326)
([pdf](https://arxiv.org/pdf/1609.08326.pdf)).
"""
def __init__(self, learning_rate, variance_parameter, num_workers=1,
use_locking=False, name="DelayCompensatedGradientDescent"):
"""Construct a new gradient descent optimizer with delay compensation.
Args:
learning_rate: A Tensor or a floating point value. The learning
rate to use.
variance_parameter: A Tensor or a floating point value. The lambda
value to use.
num_workers: A value to indicate number of workers computing gradients
asynchronously.
use_locking: If True use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "DelayCompensatedGradientDescent".
"""
if num_workers <= 0:
raise ValueError("num_workers must be positive: %s" % num_workers)
super(DelayCompensatedGradientDescentOptimizer, self).__init__(
use_locking, name)
self._learning_rate = learning_rate
self._lambda = variance_parameter
self._num_workers = num_workers
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None, worker_index=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
worker_index: Optional. A value to indicate the instance of worker
minimizing if computing asynchronously.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
"""
if (worker_index < 0 and worker_index is not None) or worker_index >= self._num_workers:
raise ValueError("worker index must be in the range [0, num_workers): %s" %
worker_index)
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name, worker_index=worker_index)
def apply_gradients(self,
grads_and_vars,
global_step=None,
name=None,
worker_index=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
worker_index: Optional value to indicate the instance of worker
minimizing if computing asynchronously.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
converted_grads_and_vars = []
for g, v in grads_and_vars:
if g is not None:
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
converted_grads_and_vars.append((g, v, p))
converted_grads_and_vars = tuple(converted_grads_and_vars)
var_list = [v for g, v, _ in converted_grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, _, v in converted_grads_and_vars],))
with ops.control_dependencies(None):
self._create_slots([optimizer._get_variable_for(v) for v in var_list])
update_ops = []
with ops.name_scope(name, self._name) as name:
self._prepare()
for grad, var, processor in converted_grads_and_vars:
if grad is None:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
with ops.name_scope("update_" + var.op.name), ops.colocate_with(var):
if worker_index is None:
update_ops.append(processor.update_op(self, grad))
else:
update_ops.append(processor.update_op_asynchronous(self, grad,
worker_index))
if global_step is None:
apply_updates = self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
apply_updates = state_ops.assign_add(global_step, 1, name=name).op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def _create_slots(self, var_list):
"""Initialize slots for all the vars of each worker to store
the previous values of it
"""
for index in range(self._num_workers):
for v in var_list:
var2 = array_ops.identity(v.initialized_value())
self._get_or_make_slot(v, var2, "shadow_{0}".format(index),
self._name)
def _resource_apply_dense(self, grad, var, worker_index=0):
# Get previous value of the variable from the slot
shadow = self.get_slot(var, "shadow_{0}".format(worker_index))
return training_ops.apply_delay_compensated_gradient_descent(
var.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
math_ops.cast(self._lambda_tensor, grad.dtype.base_dtype),
shadow.handle,
use_locking=self._use_locking)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._lambda_tensor = ops.convert_to_tensor(self._lambda,
name="lambda")
|
mrkn/cuda-convnet2
|
refs/heads/master
|
python_util/data.py
|
180
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
from numpy.random import randn, rand, random_integers
import os
from threading import Thread
from util import *
BATCH_META_FILE = "batches.meta"
class DataLoaderThread(Thread):
def __init__(self, path, tgt):
Thread.__init__(self)
self.path = path
self.tgt = tgt
def run(self):
self.tgt += [unpickle(self.path)]
class DataProvider:
BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$')
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
if batch_range == None:
batch_range = DataProvider.get_batch_nums(data_dir)
if init_batchnum is None or init_batchnum not in batch_range:
init_batchnum = batch_range[0]
self.data_dir = data_dir
self.batch_range = batch_range
self.curr_epoch = init_epoch
self.curr_batchnum = init_batchnum
self.dp_params = dp_params
self.batch_meta = self.get_batch_meta(data_dir)
self.data_dic = None
self.test = test
self.batch_idx = batch_range.index(init_batchnum)
def get_next_batch(self):
if self.data_dic is None or len(self.batch_range) > 1:
self.data_dic = self.get_batch(self.curr_batchnum)
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
return epoch, batchnum, self.data_dic
def get_batch(self, batch_num):
fname = self.get_data_file_name(batch_num)
if os.path.isdir(fname): # batch in sub-batches
sub_batches = sorted(os.listdir(fname), key=alphanum_key)
#print sub_batches
num_sub_batches = len(sub_batches)
tgts = [[] for i in xrange(num_sub_batches)]
threads = [DataLoaderThread(os.path.join(fname, s), tgt) for (s, tgt) in zip(sub_batches, tgts)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return [t[0] for t in tgts]
return unpickle(self.get_data_file_name(batch_num))
def get_data_dims(self,idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
def advance_batch(self):
self.batch_idx = self.get_next_batch_idx()
self.curr_batchnum = self.batch_range[self.batch_idx]
if self.batch_idx == 0: # we wrapped
self.curr_epoch += 1
def get_next_batch_idx(self):
return (self.batch_idx + 1) % len(self.batch_range)
def get_next_batch_num(self):
return self.batch_range[self.get_next_batch_idx()]
# get filename of current batch
def get_data_file_name(self, batchnum=None):
if batchnum is None:
batchnum = self.curr_batchnum
return os.path.join(self.data_dir, 'data_batch_%d' % batchnum)
@classmethod
def get_instance(cls, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False):
# why the fuck can't i reference DataProvider in the original definition?
#cls.dp_classes['default'] = DataProvider
type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider
if type.startswith("dummy-"):
name = "-".join(type.split('-')[:-1]) + "-n"
if name not in dp_types:
raise DataProviderException("No such data provider: %s" % type)
_class = dp_classes[name]
dims = int(type.split('-')[-1])
return _class(dims)
elif type in dp_types:
_class = dp_classes[type]
return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
raise DataProviderException("No such data provider: %s" % type)
@classmethod
def register_data_provider(cls, name, desc, _class):
if name in dp_types:
raise DataProviderException("Data provider %s already registered" % name)
dp_types[name] = desc
dp_classes[name] = _class
@staticmethod
def get_batch_meta(data_dir):
return unpickle(os.path.join(data_dir, BATCH_META_FILE))
@staticmethod
def get_batch_filenames(srcdir):
return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key)
@staticmethod
def get_batch_nums(srcdir):
names = DataProvider.get_batch_filenames(srcdir)
return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names)))
@staticmethod
def get_num_batches(srcdir):
return len(DataProvider.get_batch_nums(srcdir))
class DummyDataProvider(DataProvider):
def __init__(self, data_dim):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True}
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx = 0
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
data = rand(512, self.get_data_dims()).astype(n.single)
return self.curr_epoch, self.curr_batchnum, {'data':data}
class LabeledDataProvider(DataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False):
DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
def get_num_classes(self):
return len(self.batch_meta['label_names'])
class LabeledDummyDataProvider(DummyDataProvider):
def __init__(self, data_dim, num_classes=10, num_cases=7):
#self.data_dim = data_dim
self.batch_range = [1]
self.batch_meta = {'num_vis': data_dim,
'label_names': [str(x) for x in range(num_classes)],
'data_in_rows':True}
self.num_cases = num_cases
self.num_classes = num_classes
self.curr_epoch = 1
self.curr_batchnum = 1
self.batch_idx=0
self.data = None
def get_num_classes(self):
return self.num_classes
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
if self.data is None:
data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand
labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single)
self.data, self.labels = data, labels
else:
data, labels = self.data, self.labels
# print data.shape, labels.shape
return self.curr_epoch, self.curr_batchnum, [data.T, labels.T ]
dp_types = {"dummy-n": "Dummy data provider for n-dimensional data",
"dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"}
dp_classes = {"dummy-n": DummyDataProvider,
"dummy-labeled-n": LabeledDummyDataProvider}
class DataProviderException(Exception):
pass
|
jgardner1/JGRPGTools
|
refs/heads/master
|
jgrpg/CreateItemPrototypeWidget.py
|
1
|
from PyQt5.uic import loadUiType
ui, base = loadUiType('ui/CreateItemPrototypeWidget.ui')
from PyQt5.QtCore import pyqtSignal, Qt
from jgrpg.model import ItemPrototypes
class CreateItemPrototypeWidget(base, ui):
def __init__(self, *, obj=None):
"""Creates an populates a Create / Edit Item Window.
If item is specified, it is an edit window."""
super(CreateItemPrototypeWidget, self).__init__()
self.obj = obj
# Setup the UI elements
self.setupUi(self)
self.reset()
def clicked(self, button):
role = self.buttonBox.buttonRole(button)
if role == self.buttonBox.ApplyRole:
self.apply()
elif role == self.buttonBox.ResetRole:
self.reset()
elif role == self.buttonBox.AcceptRole:
self.apply()
self.parent().close()
elif role == self.buttonBox.RejectRole:
self.parent().close()
else:
print("Unknown role")
def reset(self):
if self.obj:
# Reset to the original item
item = self.obj
# Set the window title
self.setWindowTitle("Edit {} (Item Prototype)".format(item.name))
# Set the name line edit
self.nameLineEdit.setText(item.name)
# Set the type combo box
type_index = self.typeComboBox.findText(item.type)
if type_index == -1:
self.typeComboBox.setEditText(item.type)
else:
self.typeComboBox.setCurrentIndex(type_index)
# Set the weight
self.weightSpinBox.setValue(item.weight)
# Set the size
self.sizeSpinBox.setValue(item.size)
# Set the value
self.valueSpinBox.setValue(item.value)
else:
# Set the defaults
self.setWindowTitle("Create Item Prototype")
self.nameLineEdit.setText("")
self.typeComboBox.setCurrentIndex(-1)
self.weightSpinBox.setValue(0.5)
self.sizeSpinBox.setValue(6.0)
self.valueSpinBox.setValue(1.0)
def apply(self):
# Create or update the item.
data = {
"name":self.nameLineEdit.text().strip(),
"type":self.typeComboBox.currentText().strip(),
"weight": self.weightSpinBox.value(),
"size": self.sizeSpinBox.value(),
"value": self.valueSpinBox.value(),
}
if self.obj:
# Update the item because we are editing it
self.obj.update(**data)
else:
# Create the item because we are creating a new one
self.obj = ItemPrototypes.add(**data)
self.reset()
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
refs/heads/master
|
Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py2/jsonschema/_version.py
|
18
|
# This file is automatically generated by setup.py.
__version__ = '2.6.0'
__sha__ = 'gd16713a'
__revision__ = 'gd16713a'
|
sergeimoiseev/othodi_code
|
refs/heads/master
|
old2/test_tsp.py
|
6
|
import tsp
from math import sqrt
def test_rand_seq():
expected=range(100)
seq=list(tsp.rand_seq(100))
assert len(expected) == len(seq)
for n in expected:
assert n in seq
def test_all_pairs():
'''make sure we actually generate all pairs'''
expected=[(0,0),(0,1),(1,0),(1,1),(0,2),(2,0),(1,2),(2,1),(2,2)]
pairs=list(tsp.all_pairs(3))
assert len(expected) == len(pairs)
for pair in expected:
assert pair in pairs
def test_reversed_sections():
expected=[[2,1,3],[3,1,2],[3,2,1],[1,3,2],[2,3,1]]
rev=list(tsp.reversed_sections([1,2,3]))
assert len(expected) == len(rev)
for tour in expected:
assert tour in rev
def test_swapped_cities():
expected=[[2,1,3],[3,2,1],[1,3,2]]
swapped=list(tsp.swapped_cities([1,2,3]))
assert len(expected) == len(swapped)
for tour in expected:
assert tour in swapped
def test_cartesian_matrix():
coords=[(0,0),(0,1),(1,0),(1,1)]
matrix=tsp.cartesian_matrix(coords)
assert 16 == len(matrix)
assert matrix[(0,0)] == 0
assert matrix[(1,1)] == 0
assert matrix[(2,2)] == 0
assert matrix[(3,3)] == 0
assert matrix[(0,1)] == 1
assert matrix[(1,0)] == 1
assert matrix[(0,2)] == 1
assert matrix[(2,0)] == 1
assert matrix[(0,3)] == sqrt(2)
assert matrix[(3,0)] == sqrt(2)
assert matrix[(1,2)] == sqrt(2)
assert matrix[(2,1)] == sqrt(2)
assert matrix[(1,3)] == 1
assert matrix[(3,1)] == 1
assert matrix[(2,3)] == 1
assert matrix[(3,2)] == 1
def test_read_coords():
coord_file="""0.0,0.0\n0,1\n1,0.0\n1.0,1.0""".split()
coords=tsp.read_coords(coord_file)
assert 4 == len(coords)
assert [(0.0,0.0),(0.0,1.0),(1.0,0.0),(1.0,1.0)] == coords
def test_tour_length():
coords=[(0,0),(0,1),(1,0),(1,1)]
matrix=tsp.cartesian_matrix(coords)
assert 2 == tsp.tour_length(matrix,[0,1])
assert 2 == tsp.tour_length(matrix,[0,2])
assert (1+sqrt(2)+1) == tsp.tour_length(matrix,[0,1,2])
assert (1+sqrt(2)+1+sqrt(2)) == tsp.tour_length(matrix,[0,1,2,3])
|
jank3/django
|
refs/heads/master
|
tests/migrations/test_migrations_backwards_deps_1/0002_second.py
|
416
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('migrations', '0001_initial')]
operations = []
|
WSDC-NITWarangal/django
|
refs/heads/master
|
django/contrib/gis/geos/coordseq.py
|
374
|
"""
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import byref, c_double, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in range(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d:
self.setZ(index, value[2])
# #### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise IndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
# #### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
# ### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
# ### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz:
substr = '%s,%s,%s '
else:
substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join(substr % self[i] for i in range(len(self))).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1:
return self[0]
else:
return tuple(self[i] for i in range(n))
|
RydrDojo/Ridr_app
|
refs/heads/master
|
pylotVenv/lib/python2.7/site-packages/werkzeug/datastructures.py
|
148
|
# -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import codecs
import mimetypes
from copy import deepcopy
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper, \
to_native
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
list.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo=None):
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists."""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.deepcopy(memo=memo)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(to_native(k), v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def _keys_impl(self):
"""This function exists so __len__ can be implemented more efficiently,
saving one list creation from an iterator.
Using this for Python 2's ``dict.keys`` behavior would be useless since
`dict.keys` in Python 2 returns a list, while we have a set here.
"""
rv = set()
for d in self.dicts:
rv.update(iterkeys(d))
return rv
def keys(self):
return iter(self._keys_impl())
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self._keys_impl())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if (self._value_matches(server_item, client_item)
and quality > 0):
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
" ".join(
"%s=%r" % (k, v) for k, v in sorted(self.items())
),
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __nonzero__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm', 'qop'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(sys.getfilesystemencoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
__bool__ = __nonzero__
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
|
denys-duchier/kivy
|
refs/heads/master
|
examples/gestures/my_gestures.py
|
29
|
#!/usr/bin/env python
from kivy.gesture import GestureDatabase
gdb = GestureDatabase()
cross = gdb.str_to_gesture(
'eNq1l9tu3DYQhu/1It6bLjicE+cFtrcF/ACBYwv2Iqkt7G7a5u1DDqlT0lZ7I2Mxsj5JP2f4k'
'xR1OH85//X9+Npfb98uffd7Ow6hO7wM0D0+vD/92T90Q8z/5gN218eH6+3y8aW/5lPqDl8H7g'
'7/KvLot3WDFCnNzw8f5/dbeSyVx+w/Hvuj3NUNUDMoKXzPj0DsTuGIGiRJGCOVbP4pV7E7/Ra'
'ORJZMwhS1u35++v9WyFvh7rU2ENEMJI1RuLu+NnEEgyhpjEG2xb1y0H3Ek4vbKA6kEmyKEWdx'
'WPaJsaVN8eidH2Ef8ejiOIqHdbeQLvolaLTFLxlty7ulsVlaNBKChCnmEpp+uRSSIozRBLbl3'
'dSoe8m7rdF2kkc3FmGSB1FVphYT6qSejY2sOsXtyYRuLOIkHgEtGrZIKJP4Spk13ZG524qzrX'
'FWLlHmfok/9UvcFndTUe8Rz8OVA7RIYXtAoluKdke3hJU42j0dQ24pwV7ybiptm1oGE+Rz4il'
'u9w25q8R3qQtnZ2WKd+TutpLeox4pZmt1jHlh3VR3X8nuUceFdIm4qc5uKy9mapCY339jXKb+'
'08tjewlmN5VxH3H3lBcLcASZfzGv4osFPiVk5SnmCb6p766y7qbvvvL0Zg2GKtHGCDjPJ2FKi'
'cfI2wuNuKsCu2i7qYLzdiP3BXGLYIs1DEAo0hh1eyaJeyq8i7b7KdM26ddNXpPO7SCSTnHbSn'
'ErxXaQVndSJyeJGQOMMe+RJm1aLrk5bku7kYp7SLuPOvlI6/FHs4+87hH2Fats/p8vff8+beV'
'Vyl5etTucMA/a7kSE+XAbNHVPmcGK2ZKBsxSciTPUyqAwUmdRKouFCToDqgwLU3MWQmVUmDnD'
'1O7jwsCfOuXFt0JxGKNDaS1rhVwhV5gqpBW0CnEJLaw0G4QKwwrGmlJaQaxw1bp5QRBsBb2iZ'
'MvczQtSWjGvx09m5uXwmnk1tNKDEGYbZli94TX0aqg1nRrE5Z3WoFdDtWyFBr0aqR2URljLqa'
'1bbNDrsToywth69QfqGIrcaDVoPSoBqkNtbDE1Wi3iqsAtV6gesSdL0vKCalLNdqa5rjpB3vr'
'z69utfJTmz8qTFclM/z6/3N4cSteSyvT28bW/PL0/935Ffdsd1n9Q7muT+dNw+Xj59lzFU+6W'
'Y5L8ug5qwTR/PFH5bDz+AM/6Dqo=')
circle = gdb.str_to_gesture(
'eNq1WNtyGzcMfd8fiV+iIS4EyB9QXzuTD+g4icbxpLU1ttI2f18QoHa5jpy12qlGAyUw9hA4B'
'yQh3dx/vf/z++7u8Hz69nSYfumfxzTdfD7C9OHdw+0fh3fTEe2f9kHT84d3z6enx6+HZ/svTz'
'e/H/N0cxHkg4dNR2lQas8fH+8fTu2x0h6rrzz2a4uajhAZtBS+2yOA0z7tCDMkqjk1y0W1pfN'
'3+zNN+/dpl0pGqmdLJcv0/PH25+uwr5Onu74EMxErVLeGMT3fNXQDT4C1kopbLVBhG92LB91G'
'h6RJinRb5A2ZF8euM/aP5JyxhVVShbBiJW9ho7OPcMZG1YTzWzTP2LgAm8XyBmx0bJqxsfSkm'
'8UqMzaRluVduW5ju5o4qwkKmFWruM28cGLNcW3eriXOWi5kv8zbNK7GcwmbabtP0LXEWcuB7M'
'Y3pSVxVuRMiGFJaBOcXEzqYrZtwglLEsluBdOSen7R5LiN7nISzehAwsQZwC3yfwJ3PSnP4FZ'
'31fnNrZX/PS8uKOkCvpBiFkfSF2Sz3PpoC9wVpTqDExXKmkvYmoc274S8nRZ2RXlR1B5u5xGG'
'1bKcK7a5rtz77ILyIihVSVzL2bb8zuB4LefsgnJ+AzjQtTuUXVBeBCVRTKSMbq1HA/u9b7DMo'
'hpWSLbPFnZFuf4/6NklzYOki57N1rzAQ8YV/HY3Ztc0L5piXR0BMqAj1GItLmELb/OeXdQ87F'
'KB8QhIOKDXOXGzOb2BGVc1D9sUaNxJNiPM6LQkbpbS9lbKrmpeVAXluRtTTloXdDs2B95z3UY'
'XV1UWVe2aPydutqAO6HIt7+KqCm0f7A2+Fk1JkNyiTQ+b8C6rzJdpyrkoQUlhUx6IZ61XEi8u'
'qyzXKSODTSphAcsAjnBlz4irKvN9CjVxsXEwu2UZ9iopXcmLuqgKWwOMb2JJNoh2y284B9Q11'
'WU6En2NFuRrFVVXVJdhN71QdDhjBIe3a/3xtg3/n54Oh4d5lFdps7zNmjd7Rt2laY9ad3V88X'
'Q6apluW0TxiAL2Yc7qTkqjs6RwSjgxnOBOhpUTw6krJ7kz06up2D3iETUiNB7L7pT+WLZhf3h'
'Ji5CIiLyU4zENZ5SlPYWoVSNZTeGMWv25vWnqzhq1amBKYNaotcTjuUdGrSXy8+rMGbWW7E6u'
'66SpRUStJRbgvGZDW0QUXqICjgqqjE6q4dSVM19Yrawi4MJqwUIJyTBqg5TGOpC6F1be9ON6t'
'tdGVkB+XNA2ZYREjwFfCgmSNJKC8/JBjMbyAN0bzLjS5k3dG9RoLJJK95axY9K52KBAohGShB'
'eCAgncdIFcABj79nJI8JHzT0L6/kg/CQk+mFd5Q/BBtKoRZNzXZz4g+MC0YhSCD+hqXWhGgCA'
'n1dcFxWAqdV0uND1gMJWCTIyeBnRy7CtZeLl7KbxRLPaykMObXu1k+1rpIb2VL20Hm9siJFKl'
'TiTqylu7t4SXx31o35ZW3p4zpZVXLyxNsAq5xBIFH/1syH1FCj76Hs69PSn4KHU8k2w+D5aCZ'
'+lbhKLqGvVJF5+i6hp0yRk3WiLFWSedfepdQOMBCtyFl/GsBe5a9xO4r8axESCNh7UNo+GFVw'
'95YB57VHuaHL0Pebwy7DYPb79IevLcex/W3n7/0dobhWI+X1buzVEoytprhcYN/OVwf/fl1H7'
'0sql+ry83CbbfxP66/3z64iF2m9sJZxjmPT3+fni6ffh08L9w/IixfkGL68PDb8enx8/fPsVS'
'edrnnc05diDYoIA2CpbUvu7t/gFuoPx3')
check = gdb.str_to_gesture(
'eNq1l0tuI0cMhvd9EXsTofgmL6BsA/gAgcYWbGMmtmBpksztwyY1kgZI0rNpbdr+u+pjkX+9+'
'v718+uf3zbP++Pp68d++vX8PIzp/ukA08Pd2+6P/d10wPwzHzQdH+6Op4/3z/tj/svT/ZeDTP'
'f/CnmoZtNBZ5Rl/8P769tp7uZzt/iPbr/NraYD9AjmIXzLLoDTdmxgBLsSMMIw5OHzcP6eX9O'
'0/WVsCMFGmCuaGgjBdPy0+/8wXGFkeu4Ig7LzgISbMw/j6fh8hmMMBDIkdBYPimV4pQ7W8EQM'
'UA4LMhMzCyW5watKgJoOHhxsuIz3wscVb8ExhpIxDoIgvcEzhoaiZ1geA20Rj+UAwlp4LDxd8'
'OkqO4KHYXLI4oYOwEai4oPRmHiZXr6iXOiY9mXBFUNYacAVDm4uIOCiaXws24plK9oq7PIUr5'
'6iOqsyg+acyOrezJicpTCI3YYy5HtapFNZSldLCcRgCCk4hqniDZ2dHZjCMf1EXh47laNEN2M'
'fEjkzJFgtK3BLHwEAGTcnlSu7LNPLUbo6+n2m5GbAuVzUr/ShYQzoSBSWi5iXJwyVqWRr4ctX'
'+om1mq+GR7ibAA8dJuHLfC5nGVbjl7dMS/tk1g3tB3OXZyWXsyyrsMtWtlXY5SnHGmwpPwW+s'
'3M15h5CkVvM8FyWcWH3Uce5io00A/8Eu7wUWoVdXoqswi4v5eIlseTWl6e9Cqh6+IUt6OoUwA'
'JD88KwjC4rJVZAazmpsAa6jFRaA10+qqyBLht1DRu1bNQ1bLSy0S425mWFOc9khtxcnRwuaEW'
'6PZnH8sFp5aPRKuwy0i5GMnsmHJE32dz73a5oG3nJgLyemqDgXKtPu/m2//ix379d7u6m8+Xd'
'bLrf5jg2Y9picD5OB/NpN4vQIrYYLWKJHiX6aJFblBahRSvRvEUsETuQjRapxe6uHSjPsBK7u'
'0CL0mKUyLKJ25/NLbRaUI+PzwGsRWrxzOr0qKNy5+ydHnVUzsftj7JFdK7cAUSrW3SugrdpRe'
'eqdluV6Fxdu6hdv6hcCeYabQl7KFG5ElWpiM6itqgtWovWorXYlU6/d234y/71+eWUVud1dRv'
'z2xT/en06vcwfWiP7QFU51dP7l/3H7u1xX2+gP9F+/MHc7jw1fz98vD99fTxV65yN6YfP81jE'
'8nDMa8j81bD5B2R9zCo=')
square = gdb.str_to_gesture(
'eNq1mEluIzcYRvd1EXsT4Z+HC6i3AXyAwG0LttEdW7DUSfr2YZHVEgk4KQGCtZH8RD4OH4eSb'
'1++vfz1c/O0Oxx/vO+mL8v7Hqbbxz1Odzev93/ubqY9lY/ljafD3c3h+P72bXcof8p0+32v0+'
'2HkrtabNrbrPJSf//28nqcq8VcLf+j2u9zqWmPrQdzF36WKkjTFjasKZKMaJwqGTp355/5a56'
'2v8FGREAVLIINymebDl/v/78Zqc3o9LS04EgogK6ECeSlhadFTomIHElsKqoS6/I6dPQL5JiW'
'aSTKYMZEuS6PKs8L5DD03DB11U51+gnX7EVuGOQUgqDIxHGBnKqc1+XkJXLMyAyXUFkPlGqgp'
'J/irnmSr7s5kThBIhAB5RJ3jZPOcYpbiIG6elkc81Je3DL222TVzTVMPoVJggFESoEGBIJndy'
'irWWpYRonyAnfNkk9ZQkQYghuLSlnI7tfIa5i8hDmvZFBSUwdH5ixDgKv6XvNkP+mxbDxgo1S'
'2JIhr3DVPzpOb0IGQ3DTVhTPP8nEH+bpcaqCCJzkHOjlqpISFAp/lZMqMZqIBLnCBvCYqfJKL'
'EqNyCgkFFNlZjt0qJaD1rS81UTknKuEGqYgRGUZdnsxlnapilrkiKafjurzmKec8Sy12KTPvT'
'mWn5lle9mS/B2z9MJcaqOSqvC7TflRlkZZ7bM2vNVPFT/PXWJUv8ZO5hkiAQVkz5Wpat9dcVT'
'/JXoNVv8Quoz3Wl43WZDU/x241V1vfq+UrRTFhLicyAxpecG1YTdXOqVLXP4XSRmcHCg/NIPG'
'AcnOs22uqdk4Vx777dfaaqp2uU+jPWGdkvcpeU7X8HLvXVP10pWJKOdSFksIMSgNXyWuofrpT'
'yyUKaewgxuCJeJW8ZuqnB6TxIATspkXGJ16r8vkHwMP7bvd6epx3m5/ny0V/uy1X4wamLaWVt'
'+PeY7qfoQ8wG8wGvcKACjMGiDMsj/EDpApRB8gV0lhdKpSxulaoI7QKDQfoDfIAY4Cyyf4lc4'
'lsJaSV4FotYYDzCd69bC6BfYloU5XUQ88GeYBLA9JD+6BfqX0J1VZtGThVKD72i+cS3k+NUKv'
'WZkFbhMwfVMu+BOXYHy8lENqcaMuM2vyWp+WB2kJpoLrQNhOqI21Toa3HJAttw5c2fOKFtvGz'
'jrSNmWikbdAAI11WMw8U23L2HCkOdBkxUk8/msyyCVsj0IeAKAP9aI5R+yJ1+mZqPXVYqPc0f'
'jUSA82FZk9zmWNqo45hG5WfSB1lWOaCqDMwLDkTD/SXoQy0nTzPu5en52P9f4BO25jbLfTvl8'
'fjc4VWoDZ4fPu+e79/fdjVL7we0jC+cC63nJJ/7N/fHn88NHdM29xw+VUXOp+Mzm7FcPi6+Re'
'GcFi7')
|
elit3ge/SickRage
|
refs/heads/master
|
lib/babelfish/converters/name.py
|
88
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from . import LanguageEquivalenceConverter
from ..language import LANGUAGE_MATRIX
class NameConverter(LanguageEquivalenceConverter):
CASE_SENSITIVE = False
SYMBOLS = {}
for iso_language in LANGUAGE_MATRIX:
if iso_language.name:
SYMBOLS[iso_language.alpha3] = iso_language.name
|
msingh172/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/tutv.py
|
94
|
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import compat_parse_qs
class TutvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tu\.tv/videos/(?P<id>[^/?]+)'
_TEST = {
'url': 'http://tu.tv/videos/robots-futbolistas',
'md5': '627c7c124ac2a9b5ab6addb94e0e65f7',
'info_dict': {
'id': '2973058',
'ext': 'flv',
'title': 'Robots futbolistas',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
internal_id = self._search_regex(r'codVideo=([0-9]+)', webpage, 'internal video ID')
data_content = self._download_webpage(
'http://tu.tv/flvurl.php?codVideo=%s' % internal_id, video_id, 'Downloading video info')
video_url = base64.b64decode(compat_parse_qs(data_content)['kpt'][0].encode('utf-8')).decode('utf-8')
return {
'id': internal_id,
'url': video_url,
'title': self._og_search_title(webpage),
}
|
jrising/research-common
|
refs/heads/master
|
python/geogrid/geotiffgrid.py
|
1
|
import gdal
from spacegrid import SpatialGrid
class GeotiffGrid(SpatialGrid):
def __init__(self, filepath):
ds = gdal.Open(filepath)
x0_corner, sizex, zero1, y0_corner, zero2, sizey = ds.GetGeoTransform()
band = ds.GetRasterBand(1)
array = band.ReadAsArray()
self.array = array
super(GeotiffGrid, self).__init__(x0_corner, y0_corner, sizex, sizey, array.shape[1], array.shape[0])
def getll_raw(self, latitude, longitude):
return self.array[self.rowcol(latitude, longitude)]
|
ashemedai/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_label.py
|
33
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_label
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower label.
description:
- Create, update, or destroy Ansible Tower labels. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the label.
required: True
default: null
organization:
description:
- Organization the label should be applied to.
required: True
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add label to tower organization
tower_label:
name: Custom Label
organization: My Organization
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
organization = dict(required=True),
tower_host = dict(),
tower_username = dict(),
tower_password = dict(no_log=True),
tower_verify_ssl = dict(type='bool', default=True),
tower_config_file = dict(type='path'),
state = dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
organization = module.params.get('organization')
state = module.params.get('state')
json_output = {'label': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
label = tower_cli.get_resource('label')
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
if state == 'present':
result = label.modify(name=name, organization=org['id'], create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = label.delete(name=name, organization=org['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update label, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update label: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
eahneahn/free
|
refs/heads/master
|
lib/python2.7/site-packages/nose/__init__.py
|
14
|
from nose.core import collector, main, run, run_exit, runmodule
# backwards compatibility
from nose.exc import SkipTest, DeprecatedTest
from nose.tools import with_setup
__author__ = 'Jason Pellerin'
__versioninfo__ = (1, 3, 0)
__version__ = '.'.join(map(str, __versioninfo__))
__all__ = [
'main', 'run', 'run_exit', 'runmodule', 'with_setup',
'SkipTest', 'DeprecatedTest', 'collector'
]
|
FrederichRiver/neutrino
|
refs/heads/master
|
applications/alkaid/alkaid/fundamental.py
|
1
|
#!/usr/bin/python3
from venus.stock_base import StockEventBase
from dev_global.env import GLOBAL_HEADER
class EventFundamental(StockEventBase):
def __init__(self, header):
super(EventFundamental, self).__init__(header)
def quarter_profit(self, stock_code, report_period):
result = self.mysql.condition_select(
"income_statement_sheet", "r4_net_profit",
f"stock_code='{stock_code}' and report_period='{report_period}'"
)
return result
if __name__ == "__main__":
from dev_global.env import GLOBAL_HEADER
event = EventFundamental(GLOBAL_HEADER)
result = event.quarter_profit('SH600000', '2019-12-31')
print(result)
|
hyperized/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/postgres.py
|
6
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
# Most of this was originally added by other creators in the postgresql_user module.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
psycopg2 = None # This line needs for unit tests
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.database import pg_quote_identifier
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
from distutils.version import LooseVersion
def postgres_common_argument_spec():
"""
Return a dictionary with connection options.
The options are commonly used by most of PostgreSQL modules.
"""
return dict(
login_user=dict(default='postgres'),
login_password=dict(default='', no_log=True),
login_host=dict(default=''),
login_unix_socket=dict(default=''),
port=dict(type='int', default=5432, aliases=['login_port']),
ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
ca_cert=dict(aliases=['ssl_rootcert']),
)
def ensure_required_libs(module):
"""Check required libraries."""
if not HAS_PSYCOPG2:
module.fail_json(msg=missing_required_lib('psycopg2'))
if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
"""Connect to a PostgreSQL database.
Return psycopg2 connection object.
Args:
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
conn_params (dict) -- dictionary with connection parameters
Kwargs:
autocommit (bool) -- commit automatically (default False)
fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
"""
ensure_required_libs(module)
db_connection = None
try:
db_connection = psycopg2.connect(**conn_params)
if autocommit:
if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
# Switch role, if specified:
if module.params.get('session_role'):
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
try:
cursor.execute('SET ROLE %s' % module.params['session_role'])
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e))
finally:
cursor.close()
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least '
'version 8.4 to support sslrootcert')
if fail_on_conn:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
else:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
db_connection = None
except Exception as e:
if fail_on_conn:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
else:
module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
db_connection = None
return db_connection
def exec_sql(obj, query, ddl=False, add_to_executed=True):
"""Execute SQL.
Auxiliary function for PostgreSQL user classes.
Returns a query result if possible or True/False if ddl=True arg was passed.
It necessary for statements that don't return any result (like DDL queries).
Arguments:
obj (obj) -- must be an object of a user class.
The object must have module (AnsibleModule class object) and
cursor (psycopg cursor object) attributes
query (str) -- SQL query to execute
ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
(default False)
add_to_executed (bool) -- append the query to obj.executed_queries attribute
"""
try:
obj.cursor.execute(query)
if add_to_executed:
obj.executed_queries.append(query)
if not ddl:
res = obj.cursor.fetchall()
return res
return True
except Exception as e:
obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
def get_conn_params(module, params_dict, warn_db_default=True):
"""Get connection parameters from the passed dictionary.
Return a dictionary with parameters to connect to PostgreSQL server.
Args:
module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
params_dict (dict) -- dictionary with variables
Kwargs:
warn_db_default (bool) -- warn that the default DB is used (default True)
"""
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the return dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
# Might be different in the modules:
if params_dict.get('db'):
params_map['db'] = 'database'
elif params_dict.get('database'):
params_map['database'] = 'database'
elif params_dict.get('login_db'):
params_map['login_db'] = 'database'
else:
if warn_db_default:
module.warn('Database name has not been passed, '
'used default database to connect to.')
kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and params_dict["login_unix_socket"] != "":
kw["host"] = params_dict["login_unix_socket"]
return kw
class PgMembership(object):
def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
self.module = module
self.cursor = cursor
self.target_roles = [r.strip() for r in target_roles]
self.groups = [r.strip() for r in groups]
self.executed_queries = []
self.granted = {}
self.revoked = {}
self.fail_on_role = fail_on_role
self.non_existent_roles = []
self.changed = False
self.__check_roles_exist()
def grant(self):
for group in self.groups:
self.granted[group] = []
for role in self.target_roles:
# If role is in a group now, pass:
if self.__check_membership(group, role):
continue
query = "GRANT %s TO %s" % ((pg_quote_identifier(group, 'role'),
(pg_quote_identifier(role, 'role'))))
self.changed = exec_sql(self, query, ddl=True)
if self.changed:
self.granted[group].append(role)
return self.changed
def revoke(self):
for group in self.groups:
self.revoked[group] = []
for role in self.target_roles:
# If role is not in a group now, pass:
if not self.__check_membership(group, role):
continue
query = "REVOKE %s FROM %s" % ((pg_quote_identifier(group, 'role'),
(pg_quote_identifier(role, 'role'))))
self.changed = exec_sql(self, query, ddl=True)
if self.changed:
self.revoked[group].append(role)
return self.changed
def __check_membership(self, src_role, dst_role):
query = ("SELECT ARRAY(SELECT b.rolname FROM "
"pg_catalog.pg_auth_members m "
"JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
"WHERE m.member = r.oid) "
"FROM pg_catalog.pg_roles r "
"WHERE r.rolname = '%s'" % dst_role)
res = exec_sql(self, query, add_to_executed=False)
membership = []
if res:
membership = res[0][0]
if not membership:
return False
if src_role in membership:
return True
return False
def __check_roles_exist(self):
existent_groups = self.__roles_exist(self.groups)
existent_roles = self.__roles_exist(self.target_roles)
for group in self.groups:
if group not in existent_groups:
if self.fail_on_role:
self.module.fail_json(msg="Role %s does not exist" % group)
else:
self.module.warn("Role %s does not exist, pass" % group)
self.non_existent_roles.append(group)
for role in self.target_roles:
if role not in existent_roles:
if self.fail_on_role:
self.module.fail_json(msg="Role %s does not exist" % role)
else:
self.module.warn("Role %s does not exist, pass" % role)
if role not in self.groups:
self.non_existent_roles.append(role)
else:
if self.fail_on_role:
self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
else:
self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
# Update role lists, excluding non existent roles:
self.groups = [g for g in self.groups if g not in self.non_existent_roles]
self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
def __roles_exist(self, roles):
tmp = ["'" + x + "'" for x in roles]
query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
|
stvoutsin/pyrothorn
|
refs/heads/master
|
pyrothorn/pyroquery/atpy/rechelper.py
|
1
|
import numpy as np
def append_field(rec, data, dtype=None, position='undefined'):
newdtype = rec.dtype.descr
if position == 'undefined':
newdtype.append(dtype)
else:
newdtype.insert(position, dtype)
newdtype = np.dtype(newdtype)
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
newrec[dtype[0]] = data
return newrec
def drop_fields(rec, names):
names = set(names)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in newdtype.fields:
newrec[field] = rec[field]
return newrec
|
dhruvvyas90/RIOT
|
refs/heads/master
|
tests/posix_semaphore/tests/01-run.py
|
5
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright (C) 2015 Martine Lenders <mlenders@inf.fu-berlin.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
import pexpect
def init():
term = pexpect.spawn("make term", timeout=1.1)
term.logfile = sys.stdout
return term
def test1(term):
term.expect_exact("######################### TEST1:")
term.expect_exact("first: sem_init")
term.expect_exact("first: thread create")
term.expect_exact("first: thread created")
term.expect_exact("first: sem_getvalue")
term.expect_exact("first: sem_getvalue != 0")
term.expect_exact("first: do yield")
term.expect_exact("second: sem_trywait")
term.expect_exact("second: sem_trywait done with == 0")
term.expect_exact("second: wait for post")
term.expect_exact("first: done yield")
term.expect_exact("first: sem_trywait")
term.expect_exact("first: sem_trywait FAILED")
term.expect_exact("first: sem_trywait done")
term.expect_exact("first: sem_post")
term.expect_exact("second: sem_wait failed")
term.expect_exact("second: sem was posted")
term.expect_exact("second: end")
term.expect_exact("first: sem_post done")
term.expect_exact("first: sem_destroy")
term.expect_exact("first: end")
def test2(term):
term.expect_exact("######################### TEST2:")
term.expect_exact("first: sem_init")
term.expect_exact("first: thread create: 5")
term.expect_exact("first: thread created: priority 5 (1/5)")
term.expect_exact("first: thread create: 4")
term.expect_exact("first: thread created: priority 4 (2/5)")
term.expect_exact("first: thread create: 3")
term.expect_exact("first: thread created: priority 3 (3/5)")
term.expect_exact("first: thread create: 2")
term.expect_exact("first: thread created: priority 2 (4/5)")
term.expect_exact("first: thread create: 1")
term.expect_exact("first: thread created: priority 1 (5/5)")
term.expect_exact("------------------------------------------")
term.expect_exact("post no. 0")
term.expect_exact("Thread 'priority 1' woke up.")
term.expect_exact("Back in main thread.")
term.expect_exact("post no. 1")
term.expect_exact("Thread 'priority 2' woke up.")
term.expect_exact("Back in main thread.")
term.expect_exact("post no. 2")
term.expect_exact("Thread 'priority 3' woke up.")
term.expect_exact("Back in main thread.")
term.expect_exact("post no. 3")
term.expect_exact("Thread 'priority 4' woke up.")
term.expect_exact("Back in main thread.")
term.expect_exact("post no. 4")
term.expect_exact("Thread 'priority 5' woke up.")
term.expect_exact("Back in main thread.")
def test3(term):
term.expect_exact("######################### TEST3:")
term.expect_exact("first: sem_init s1")
term.expect_exact("first: sem_init s2")
term.expect_exact("first: create thread 1")
term.expect_exact("first: create thread 2")
term.expect_exact("------------------------------------------")
term.expect_exact("post s1")
term.expect_exact("Thread 1 woke up after waiting for s1.")
term.expect_exact("post s2")
term.expect_exact("Thread 2 woke up after waiting for s2.")
term.expect_exact("post s2")
term.expect_exact("Thread 1 woke up after waiting for s2.")
term.expect_exact("post s1")
term.expect_exact("Thread 2 woke up after waiting for s1.")
def test4(term):
term.expect_exact("######################### TEST4:")
term.expect_exact("first: sem_init s1")
term.expect_exact("first: wait 1 sec for s1")
term.expect(r"first: waited 1\.\d{6} s")
if __name__ == "__main__":
TERM = init()
test1(TERM)
test2(TERM)
test3(TERM)
test4(TERM)
TERM.expect("######################### DONE")
|
Adai0808/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
286
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
yan12125/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/nuevo.py
|
51
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
xpath_text
)
class NuevoBaseIE(InfoExtractor):
def _extract_nuevo(self, config_url, video_id, headers={}):
config = self._download_xml(
config_url, video_id, transform_source=lambda s: s.strip(),
headers=headers)
title = xpath_text(config, './title', 'title', fatal=True).strip()
video_id = xpath_text(config, './mediaid', default=video_id)
thumbnail = xpath_text(config, ['./image', './thumb'])
duration = float_or_none(xpath_text(config, './duration'))
formats = []
for element_name, format_id in (('file', 'sd'), ('filehd', 'hd')):
video_url = xpath_text(config, element_name)
if video_url:
formats.append({
'url': video_url,
'format_id': format_id,
})
self._check_formats(formats, video_id)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats
}
|
ryancoleman/autodock-vina
|
refs/heads/master
|
boost_1_54_0/libs/mpi/test/python/all_reduce_test.py
|
64
|
# Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test all_reduce() collective.
import boost.parallel.mpi as mpi
from generators import *
def all_reduce_test(comm, generator, kind, op, op_kind):
if comm.rank == 0:
print ("Reducing to %s of %s..." % (op_kind, kind)),
my_value = generator(comm.rank)
result = mpi.all_reduce(comm, my_value, op)
expected_result = generator(0);
for p in range(1, comm.size):
expected_result = op(expected_result, generator(p))
assert result == expected_result
if comm.rank == 0:
print "OK."
return
all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x + y, "sum")
all_reduce_test(mpi.world, int_generator, "integers", lambda x,y:x * y, "product")
all_reduce_test(mpi.world, string_generator, "strings", lambda x,y:x + y, "concatenation")
all_reduce_test(mpi.world, string_list_generator, "list of strings", lambda x,y:x + y, "concatenation")
|
Tab3/android_kernel_samsung_lt02
|
refs/heads/cm-12.1
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
shenlong3030/asv-django-guestbook
|
refs/heads/master
|
djangoappengine/tests/__init__.py
|
37
|
from .backend import BackendTest
from .field_db_conversion import FieldDBConversionTest
from .field_options import FieldOptionsTest
from .filter import FilterTest
from .order import OrderTest
from .not_return_sets import NonReturnSetsTest
from .decimals import DecimalTest
|
NeblioTeam/neblio
|
refs/heads/master
|
test/functional/todo/feature_config_args.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "neblio.conf")
with open(conf_file, 'a', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.isfile(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.isfile(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
|
wwj718/edx-platform
|
refs/heads/master
|
common/djangoapps/util/tests/test_keyword_sub_utils.py
|
130
|
"""
Tests for keyword_substitution.py
"""
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from ddt import ddt, file_data
from mock import patch
from util.date_utils import get_default_time_display
from util import keyword_substitution as Ks
@ddt
class KeywordSubTest(ModuleStoreTestCase):
""" Tests for the keyword substitution feature """
def setUp(self):
super(KeywordSubTest, self).setUp(create_user=False)
self.user = UserFactory.create(
email="testuser@edx.org",
username="testuser",
profile__name="Test User"
)
self.course = CourseFactory.create(
org='edx',
course='999',
display_name='test_course'
)
self.context = {
'user_id': self.user.id,
'course_title': self.course.display_name,
'name': self.user.profile.name,
'course_end_date': get_default_time_display(self.course.end),
}
@file_data('fixtures/test_keyword_coursename_sub.json')
def test_course_name_sub(self, test_info):
""" Tests subbing course name in various scenarios """
course_name = self.course.display_name
result = Ks.substitute_keywords_with_data(
test_info['test_string'], self.context,
)
self.assertIn(course_name, result)
self.assertEqual(result, test_info['expected'])
def test_anonymous_id_sub(self):
"""
Test that anonymous_id is subbed
"""
test_string = "Turn %%USER_ID%% into anonymous id"
anonymous_id = Ks.anonymous_id_from_user_id(self.user.id)
result = Ks.substitute_keywords_with_data(
test_string, self.context,
)
self.assertNotIn('%%USER_ID%%', result)
self.assertIn(anonymous_id, result)
def test_name_sub(self):
"""
Test that the user's full name is correctly subbed
"""
test_string = "This is the test string. subthis: %%USER_FULLNAME%% into user name"
user_name = self.user.profile.name
result = Ks.substitute_keywords_with_data(
test_string, self.context,
)
self.assertNotIn('%%USER_FULLNAME%%', result)
self.assertIn(user_name, result)
def test_illegal_subtag(self):
"""
Test that sub-ing doesn't ocurr with illegal tags
"""
test_string = "%%user_id%%"
result = Ks.substitute_keywords_with_data(
test_string, self.context,
)
self.assertEquals(test_string, result)
def test_should_not_sub(self):
"""
Test that sub-ing doesn't work without subtags
"""
test_string = "this string has no subtags"
result = Ks.substitute_keywords_with_data(
test_string, self.context,
)
self.assertEquals(test_string, result)
@file_data('fixtures/test_keywordsub_multiple_tags.json')
def test_sub_multiple_tags(self, test_info):
""" Test that subbing works with multiple subtags """
anon_id = '123456789'
with patch('util.keyword_substitution.anonymous_id_from_user_id', lambda user_id: anon_id):
result = Ks.substitute_keywords_with_data(
test_info['test_string'], self.context,
)
self.assertEqual(result, test_info['expected'])
def test_subbing_no_userid_or_courseid(self):
"""
Tests that no subbing occurs if no user_id or no course_id is given.
"""
test_string = 'This string should not be subbed here %%USER_ID%%'
no_course_context = dict(
(key, value) for key, value in self.context.iteritems() if key != 'course_title'
)
result = Ks.substitute_keywords_with_data(test_string, no_course_context)
self.assertEqual(test_string, result)
no_user_id_context = dict(
(key, value) for key, value in self.context.iteritems() if key != 'user_id'
)
result = Ks.substitute_keywords_with_data(test_string, no_user_id_context)
self.assertEqual(test_string, result)
|
quattor/aquilon
|
refs/heads/upstream
|
lib/aquilon/worker/commands/add_netgroup_whitelist.py
|
1
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2014,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add netgroup whitelist`."""
from aquilon.aqdb.model import NetGroupWhiteList
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.change_management import ChangeManagement
class CommandAddNetgroupWhitelist(BrokerCommand):
required_parameters = ["netgroup"]
def render(self, session, netgroup, user, justification,
reason, logger, **arguments):
NetGroupWhiteList.get_unique(session, name=netgroup, preclude=True)
dbng = NetGroupWhiteList(name=netgroup)
# Validate ChangeManagement
cm = ChangeManagement(session, user, justification, reason, logger, self.command, **arguments)
cm.consider(dbng)
cm.validate()
session.add(dbng)
session.flush()
return
|
Werkov/PyQt4
|
refs/heads/master
|
examples/opengl/hellogl.py
|
17
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
import math
from PyQt4 import QtCore, QtGui, QtOpenGL
try:
from OpenGL import GL
except ImportError:
app = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.critical(None, "OpenGL hellogl",
"PyOpenGL must be installed to run this example.")
sys.exit(1)
class Window(QtGui.QWidget):
def __init__(self):
super(Window, self).__init__()
self.glWidget = GLWidget()
self.xSlider = self.createSlider()
self.ySlider = self.createSlider()
self.zSlider = self.createSlider()
self.xSlider.valueChanged.connect(self.glWidget.setXRotation)
self.glWidget.xRotationChanged.connect(self.xSlider.setValue)
self.ySlider.valueChanged.connect(self.glWidget.setYRotation)
self.glWidget.yRotationChanged.connect(self.ySlider.setValue)
self.zSlider.valueChanged.connect(self.glWidget.setZRotation)
self.glWidget.zRotationChanged.connect(self.zSlider.setValue)
mainLayout = QtGui.QHBoxLayout()
mainLayout.addWidget(self.glWidget)
mainLayout.addWidget(self.xSlider)
mainLayout.addWidget(self.ySlider)
mainLayout.addWidget(self.zSlider)
self.setLayout(mainLayout)
self.xSlider.setValue(15 * 16)
self.ySlider.setValue(345 * 16)
self.zSlider.setValue(0 * 16)
self.setWindowTitle("Hello GL")
def createSlider(self):
slider = QtGui.QSlider(QtCore.Qt.Vertical)
slider.setRange(0, 360 * 16)
slider.setSingleStep(16)
slider.setPageStep(15 * 16)
slider.setTickInterval(15 * 16)
slider.setTickPosition(QtGui.QSlider.TicksRight)
return slider
class GLWidget(QtOpenGL.QGLWidget):
xRotationChanged = QtCore.pyqtSignal(int)
yRotationChanged = QtCore.pyqtSignal(int)
zRotationChanged = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(GLWidget, self).__init__(parent)
self.object = 0
self.xRot = 0
self.yRot = 0
self.zRot = 0
self.lastPos = QtCore.QPoint()
self.trolltechGreen = QtGui.QColor.fromCmykF(0.40, 0.0, 1.0, 0.0)
self.trolltechPurple = QtGui.QColor.fromCmykF(0.39, 0.39, 0.0, 0.0)
def minimumSizeHint(self):
return QtCore.QSize(50, 50)
def sizeHint(self):
return QtCore.QSize(400, 400)
def setXRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
self.updateGL()
def setYRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
self.updateGL()
def setZRotation(self, angle):
angle = self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
self.updateGL()
def initializeGL(self):
self.qglClearColor(self.trolltechPurple.dark())
self.object = self.makeObject()
GL.glShadeModel(GL.GL_FLAT)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnable(GL.GL_CULL_FACE)
def paintGL(self):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glLoadIdentity()
GL.glTranslated(0.0, 0.0, -10.0)
GL.glRotated(self.xRot / 16.0, 1.0, 0.0, 0.0)
GL.glRotated(self.yRot / 16.0, 0.0, 1.0, 0.0)
GL.glRotated(self.zRot / 16.0, 0.0, 0.0, 1.0)
GL.glCallList(self.object)
def resizeGL(self, width, height):
side = min(width, height)
if side < 0:
return
GL.glViewport((width - side) / 2, (height - side) / 2, side, side)
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GL.glOrtho(-0.5, +0.5, +0.5, -0.5, 4.0, 15.0)
GL.glMatrixMode(GL.GL_MODELVIEW)
def mousePressEvent(self, event):
self.lastPos = event.pos()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & QtCore.Qt.LeftButton:
self.setXRotation(self.xRot + 8 * dy)
self.setYRotation(self.yRot + 8 * dx)
elif event.buttons() & QtCore.Qt.RightButton:
self.setXRotation(self.xRot + 8 * dy)
self.setZRotation(self.zRot + 8 * dx)
self.lastPos = event.pos()
def makeObject(self):
genList = GL.glGenLists(1)
GL.glNewList(genList, GL.GL_COMPILE)
GL.glBegin(GL.GL_QUADS)
x1 = +0.06
y1 = -0.14
x2 = +0.14
y2 = -0.06
x3 = +0.08
y3 = +0.00
x4 = +0.30
y4 = +0.22
self.quad(x1, y1, x2, y2, y2, x2, y1, x1)
self.quad(x3, y3, x4, y4, y4, x4, y3, x3)
self.extrude(x1, y1, x2, y2)
self.extrude(x2, y2, y2, x2)
self.extrude(y2, x2, y1, x1)
self.extrude(y1, x1, x1, y1)
self.extrude(x3, y3, x4, y4)
self.extrude(x4, y4, y4, x4)
self.extrude(y4, x4, y3, x3)
NumSectors = 200
for i in range(NumSectors):
angle1 = (i * 2 * math.pi) / NumSectors
x5 = 0.30 * math.sin(angle1)
y5 = 0.30 * math.cos(angle1)
x6 = 0.20 * math.sin(angle1)
y6 = 0.20 * math.cos(angle1)
angle2 = ((i + 1) * 2 * math.pi) / NumSectors
x7 = 0.20 * math.sin(angle2)
y7 = 0.20 * math.cos(angle2)
x8 = 0.30 * math.sin(angle2)
y8 = 0.30 * math.cos(angle2)
self.quad(x5, y5, x6, y6, x7, y7, x8, y8)
self.extrude(x6, y6, x7, y7)
self.extrude(x8, y8, x5, y5)
GL.glEnd()
GL.glEndList()
return genList
def quad(self, x1, y1, x2, y2, x3, y3, x4, y4):
self.qglColor(self.trolltechGreen)
GL.glVertex3d(x1, y1, -0.05)
GL.glVertex3d(x2, y2, -0.05)
GL.glVertex3d(x3, y3, -0.05)
GL.glVertex3d(x4, y4, -0.05)
GL.glVertex3d(x4, y4, +0.05)
GL.glVertex3d(x3, y3, +0.05)
GL.glVertex3d(x2, y2, +0.05)
GL.glVertex3d(x1, y1, +0.05)
def extrude(self, x1, y1, x2, y2):
self.qglColor(self.trolltechGreen.dark(250 + int(100 * x1)))
GL.glVertex3d(x1, y1, +0.05)
GL.glVertex3d(x2, y2, +0.05)
GL.glVertex3d(x2, y2, -0.05)
GL.glVertex3d(x1, y1, -0.05)
def normalizeAngle(self, angle):
while angle < 0:
angle += 360 * 16
while angle > 360 * 16:
angle -= 360 * 16
return angle
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
|
InnovativeTravel/boto3facade
|
refs/heads/master
|
tests/test_iam.py
|
14224
|
# -*- coding: utf-8 -*-
|
sudheesh001/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/test/html.py
|
196
|
"""
Comparing two html documents.
"""
from __future__ import unicode_literals
import re
from django.utils.encoding import force_text
from django.utils.html_parser import HTMLParser, HTMLParseError
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
WHITESPACE = re.compile('\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
@python_2_unicode_compatible
class Element(object):
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, six.string_types):
element = force_text(element)
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], six.string_types):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], six.string_types):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], six.string_types):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, six.string_types):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name'):
return False
if hasattr(element, 'name') and self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
if self.children != element.children:
return False
return True
def __hash__(self):
return hash((self.name,) + tuple(a for a in self.attributes))
def __ne__(self, element):
return not self.__eq__(element)
def _count(self, element, count=True):
if not isinstance(element, six.string_types):
if self == element:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, six.string_types):
if isinstance(element, six.string_types):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(six.text_type(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += ' />'
return output
def __repr__(self):
return six.text_type(self)
@python_2_unicode_compatible
class RootElement(Element):
def __init__(self):
super(RootElement, self).__init__(None, ())
def __str__(self):
return ''.join(six.text_type(c) for c in self.children)
class Parser(HTMLParser):
SELF_CLOSING_TAGS = ('br' , 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
def __init__(self):
HTMLParser.__init__(self)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Takes a string that contains *valid* HTML and turns it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], six.string_types):
document = document.children[0]
return document
|
glorizen/nupic
|
refs/heads/master
|
examples/opf/experiments/classification/category_TP_1/description.py
|
32
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/category_TP_1.csv'),
'modelParams': { 'clParams': { 'clVerbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { 'spVerbosity': 0},
'tpEnable': True,
'tpParams': { 'verbosity': 0}}}
mod = importBaseDescription('../base_category/description.py', config)
locals().update(mod.__dict__)
|
skevy/django
|
refs/heads/master
|
tests/regressiontests/special_headers/views.py
|
53
|
# -*- coding:utf-8 -*-
from django.http import HttpResponse
from django.utils.decorators import decorator_from_middleware
from django.views.generic import View
from django.middleware.doc import XViewMiddleware
xview_dec = decorator_from_middleware(XViewMiddleware)
def xview(request):
return HttpResponse()
class XViewClass(View):
def get(request):
return HttpResponse()
|
guoxiaolongzte/spark
|
refs/heads/master
|
python/pyspark/mllib/stat/test.py
|
155
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.mllib.common import inherit_doc, JavaModelWrapper
__all__ = ["ChiSqTestResult", "KolmogorovSmirnovTestResult"]
class TestResult(JavaModelWrapper):
"""
Base class for all test results.
"""
@property
def pValue(self):
"""
The probability of obtaining a test statistic result at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
"""
return self._java_model.pValue()
@property
def degreesOfFreedom(self):
"""
Returns the degree(s) of freedom of the hypothesis test.
Return type should be Number(e.g. Int, Double) or tuples of Numbers.
"""
return self._java_model.degreesOfFreedom()
@property
def statistic(self):
"""
Test statistic.
"""
return self._java_model.statistic()
@property
def nullHypothesis(self):
"""
Null hypothesis of the test.
"""
return self._java_model.nullHypothesis()
def __str__(self):
return self._java_model.toString()
@inherit_doc
class ChiSqTestResult(TestResult):
"""
Contains test results for the chi-squared hypothesis test.
"""
@property
def method(self):
"""
Name of the test method
"""
return self._java_model.method()
@inherit_doc
class KolmogorovSmirnovTestResult(TestResult):
"""
Contains test results for the Kolmogorov-Smirnov test.
"""
|
xlqian/navitia
|
refs/heads/dev
|
source/jormungandr/jormungandr/parking_space_availability/car/sytral.py
|
3
|
# encoding: utf-8
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import jmespath
from jormungandr.parking_space_availability.car.parking_places import ParkingPlaces
from jormungandr.parking_space_availability.car.common_car_park_provider import CommonCarParkProvider
DEFAULT_SYTRAL_FEED_PUBLISHER = None
class SytralProvider(CommonCarParkProvider):
def __init__(
self, url, operators, dataset, timeout=1, feed_publisher=DEFAULT_SYTRAL_FEED_PUBLISHER, **kwargs
):
self.provider_name = 'SYTRAL'
super(SytralProvider, self).__init__(url, operators, dataset, timeout, feed_publisher, **kwargs)
def process_data(self, data, poi):
park = jmespath.search('records[?car_park_id==`{}`]|[0]'.format(poi['properties']['ref']), data)
if park:
return ParkingPlaces(
park['available'], park['occupied'], park['available_PRM'], park['occupied_PRM']
)
|
rctay/satchmo-payment-dumb
|
refs/heads/master
|
processor.py
|
1
|
from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from livesettings import config_value, config_get_group
from payment.modules.base import HeadlessPaymentProcessor, ProcessorResult
config = config_get_group('PAYMENT_DUMB')
class PaymentProcessor(HeadlessPaymentProcessor):
def __init__(self, settings):
super(PaymentProcessor, self).__init__('dumb', settings)
def capture_payment(self, testing=False, order=None, amount=Decimal('0')):
"""
Users are expected to implement recording of payments, so we just return
a blanket success.
"""
if not config_value(config, 'LIVE'):
payment = self.record_payment(order=order, amount=amount,
transaction_id="TESTING", reason_code='0')
return ProcessorResult(self.key, True, _('Skipping payment recording'))
|
FennyFatal/i747_kernel_ics
|
refs/heads/master
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
annarev/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/numpy_ops/np_utils.py
|
3
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for internal use."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import numbers
import os
import re
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_export
from tensorflow.python.types import core
from tensorflow.python.util import nest
def _canonicalize_axis(axis, rank):
return _canonicalize_axes([axis], rank)[0]
def _canonicalize_axes(axes, rank):
rank = _maybe_static(rank)
if isinstance(rank, core.Tensor):
canonicalizer = (
lambda axis: cond(axis < 0, lambda: axis + rank, lambda: axis))
else:
canonicalizer = lambda axis: axis + rank if axis < 0 else axis
return [canonicalizer(axis) for axis in axes]
def _supports_signature():
return hasattr(inspect, 'signature')
def _to_tf_type(dtype):
"""Converts a native python or numpy type to TF DType.
Args:
dtype: Could be a python type, a numpy type or a TF DType.
Returns:
A tensorflow `DType`.
"""
return dtypes.as_dtype(dtype)
def _to_numpy_type(dtype):
"""Converts a native python or TF DType to numpy type.
Args:
dtype: Could be a python type, a numpy type or a TF DType.
Returns:
A NumPy `dtype`.
"""
if isinstance(dtype, dtypes.DType):
return dtype.as_numpy_dtype
return np.dtype(dtype)
def isscalar(val):
"""Returns whether `val` is a scalar value or scalar Tensor."""
if isinstance(val, np_arrays.ndarray):
val = val.data
if isinstance(val, core.Tensor):
ndims = val.shape.ndims
if ndims is not None:
return ndims == 0
else:
return math_ops.equal(array_ops.rank(val), 0)
else:
return np.isscalar(val)
def _has_docstring(f):
return (f and hasattr(f, '__doc__') and isinstance(f.__doc__, str) and
f.__doc__)
def _add_blank_line(s):
if s.endswith('\n'):
return s + '\n'
else:
return s + '\n\n'
def _np_signature(f):
"""An enhanced inspect.signature that can handle numpy.ufunc."""
# TODO(wangpeng): consider migrating away from inspect.signature.
# inspect.signature is supported in Python 3.3.
if not hasattr(inspect, 'signature'):
return None
if f is None:
return None
if not isinstance(f, np.ufunc):
try:
return inspect.signature(f)
except ValueError:
return None
def names_from_num(prefix, n):
if n <= 0:
return []
elif n == 1:
return [prefix]
else:
return [prefix + str(i + 1) for i in range(n)]
input_names = names_from_num('x', f.nin)
output_names = names_from_num('out', f.nout)
keyword_only_params = [('where', True), ('casting', 'same_kind'),
('order', 'K'), ('dtype', None), ('subok', True),
('signature', None), ('extobj', None)]
params = []
params += [
inspect.Parameter(name, inspect.Parameter.POSITIONAL_ONLY)
for name in input_names
]
if f.nout > 1:
params += [
inspect.Parameter(
name, inspect.Parameter.POSITIONAL_ONLY, default=None)
for name in output_names
]
params += [
inspect.Parameter(
'out',
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=None if f.nout == 1 else (None,) * f.nout)
]
params += [
inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=default)
for name, default in keyword_only_params
]
return inspect.Signature(params)
# Python 2 doesn't allow keyword-only argument. Python prior to 3.8 doesn't
# allow positional-only argument. So we conflate positional-only, keyword-only
# and positional-or-keyword arguments here.
def _is_compatible_param_kind(a, b):
def relax(k):
if k in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.KEYWORD_ONLY):
return inspect.Parameter.POSITIONAL_OR_KEYWORD
return k
return relax(a) == relax(b)
def _prepare_np_fun_name_and_fun(np_fun_name, np_fun):
"""Mutually propagates information between `np_fun_name` and `np_fun`.
If one is None and the other is not, we'll try to make the former not None in
a best effort.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: the numpy function whose docstring will be used.
Returns:
Processed `np_fun_name` and `np_fun`.
"""
if np_fun_name is not None:
assert isinstance(np_fun_name, str)
if np_fun is not None:
assert not isinstance(np_fun, str)
if np_fun is None:
assert np_fun_name is not None
try:
np_fun = getattr(np, str(np_fun_name))
except AttributeError:
np_fun = None
if np_fun_name is None:
assert np_fun is not None
np_fun_name = np_fun.__name__
return np_fun_name, np_fun
def _np_doc_helper(f, np_f, np_fun_name=None, unsupported_params=None,
link=None):
"""Helper to get docs."""
assert np_f or np_fun_name
if not np_fun_name:
np_fun_name = np_f.__name__
doc = 'TensorFlow variant of NumPy\'s `%s`.\n\n' % np_fun_name
if unsupported_params:
doc += 'Unsupported arguments: ' + ', '.join(
'`' + name + '`' for name in unsupported_params) + '.\n\n'
if _has_docstring(f):
doc += f.__doc__
doc = _add_blank_line(doc)
# TODO(wangpeng): Re-enable the following and choose inlined vs. link to numpy
# doc according to some global switch.
doc = _add_np_doc(doc, np_fun_name, np_f, link=link)
return doc
_np_doc_form = os.getenv('TF_NP_DOC_FORM', '1.16')
def get_np_doc_form():
"""Gets the form of the original numpy docstrings.
Returns:
See `set_np_doc_form` for the list of valid values.
"""
return _np_doc_form
def set_np_doc_form(value):
r"""Selects the form of the original numpy docstrings.
This function sets a global variable that controls how a tf-numpy symbol's
docstring should refer to the original numpy docstring. If `value` is
`'inlined'`, the numpy docstring will be verbatim copied into the tf-numpy
docstring. Otherwise, a link to the original numpy docstring will be
added. Which numpy version the link points to depends on `value`:
* `'stable'`: the current stable version;
* `'dev'`: the current development version;
* pattern `\d+(\.\d+(\.\d+)?)?`: `value` will be treated as a version number,
e.g. '1.16'.
Args:
value: the value to set the global variable to.
"""
global _np_doc_form
_np_doc_form = value
class Link:
def __init__(self, v):
self.value = v
class AliasOf:
def __init__(self, v):
self.value = v
class NoLink:
pass
def generate_link(flag, np_fun_name):
"""Generates link from numpy function name.
Args:
flag: the flag to control link form. See `set_np_doc_form`.
np_fun_name: the numpy function name.
Returns:
A string.
"""
# Only adds link in this case
if flag == 'dev':
template = 'https://numpy.org/devdocs/reference/generated/numpy.%s.html'
elif flag == 'stable':
template = (
'https://numpy.org/doc/stable/reference/generated/numpy.%s.html')
elif re.match(r'\d+(\.\d+(\.\d+)?)?$', flag):
# `flag` is the version number
template = ('https://numpy.org/doc/' + flag +
'/reference/generated/numpy.%s.html')
else:
return None
return template % np_fun_name
_is_check_link = (os.getenv('TF_NP_CHECK_LINK', 'False') in
('True', 'true', '1'))
def is_check_link():
return _is_check_link
def set_check_link(value):
global _is_check_link
_is_check_link = value
def _add_np_doc(doc, np_fun_name, np_f, link):
"""Appends the numpy docstring to `doc`, according to `set_np_doc_form`.
See `set_np_doc_form` for how it controls the form of the numpy docstring.
Args:
doc: the docstring to be appended to.
np_fun_name: the name of the numpy function.
np_f: (optional) the numpy function.
link: (optional) which link to use. See `np_doc` for details.
Returns:
`doc` with numpy docstring appended.
"""
flag = get_np_doc_form()
if flag == 'inlined':
if _has_docstring(np_f):
doc += 'Documentation for `numpy.%s`:\n\n' % np_fun_name
# TODO(wangpeng): It looks like code snippets in numpy doc don't work
# correctly with doctest. Fix that and remove the reformatting of the np_f
# comment.
doc += np_f.__doc__.replace('>>>', '>')
elif isinstance(flag, str):
if link is None:
url = generate_link(flag, np_fun_name)
elif isinstance(link, AliasOf):
url = generate_link(flag, link.value)
elif isinstance(link, Link):
url = link.value
else:
url = None
if url is not None:
if is_check_link():
# Imports locally because some builds may not have `requests`
import requests # pylint: disable=g-import-not-at-top
r = requests.head(url)
if r.status_code != 200:
raise ValueError("Can't open link for %s: %s" % (np_fun_name, url))
doc += 'See the NumPy documentation for [`numpy.%s`](%s).' % (
np_fun_name, url)
return doc
_is_sig_mismatch_an_error = (
os.getenv('TF_NP_SIG_MISMATCH_IS_ERROR', 'False') in ('True', 'true', '1'))
def is_sig_mismatch_an_error():
return _is_sig_mismatch_an_error
def set_is_sig_mismatch_an_error(value):
global _is_sig_mismatch_an_error
_is_sig_mismatch_an_error = value
def np_doc(np_fun_name, np_fun=None, export=True, link=None):
"""Attachs numpy docstring to a function.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: (optional) the numpy function whose docstring will be used.
export: whether to export this symbol under module
`tf.experimental.numpy`. Note that if `export` is `True`, `np_fun` must be
a function directly under the `numpy` module, not under any submodule of
`numpy` (e.g. `numpy.random`).
link: (optional) which link to use. If `None`, a default link generated from
`np_fun_name` will be used. If an instance of `AliasOf`, `link.value` will
be used in place of `np_fun_name` for the link generation. If an instance
of `Link`, `link.value` will be used as the whole link. If an instance of
`NoLink`, no link will be added.
Returns:
A function decorator that attaches the docstring from `np_fun` to the
decorated function.
"""
np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
np_sig = _np_signature(np_fun)
def decorator(f):
"""The decorator."""
unsupported_params = []
if hasattr(inspect, 'signature') and np_sig is not None:
try:
sig = inspect.signature(f)
except ValueError:
sig = None
if sig is not None:
for name, param in sig.parameters.items():
np_param = np_sig.parameters.get(name)
if np_param is None:
if is_sig_mismatch_an_error():
raise TypeError(
'Cannot find parameter "%s" in the numpy function\'s '
'signature (which has these parameters: %s)' %
(name, list(np_sig.parameters.keys())))
else:
continue
if (is_sig_mismatch_an_error() and
not _is_compatible_param_kind(param.kind, np_param.kind)):
raise TypeError(
'Parameter "%s" is of kind %s while in numpy it is of '
'kind %s' % (name, param.kind, np_param.kind))
has_default = (param.default != inspect.Parameter.empty)
np_has_default = (np_param.default != inspect.Parameter.empty)
if is_sig_mismatch_an_error() and has_default != np_has_default:
raise TypeError('Parameter "%s" should%s have a default value' %
(name, '' if np_has_default else ' not'))
for name in np_sig.parameters:
if name not in sig.parameters:
unsupported_params.append(name)
f.__doc__ = _np_doc_helper(
f, np_fun, np_fun_name=np_fun_name,
unsupported_params=unsupported_params, link=link)
if export:
return np_export.np_export(np_fun_name)(f)
else:
return f
return decorator
def np_doc_only(np_fun_name, np_fun=None, export=True):
"""Attachs numpy docstring to a function.
This differs from np_doc in that it doesn't check for a match in signature.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: (optional) the numpy function whose docstring will be used.
export: whether to export this symbol under module
`tf.experimental.numpy`. Note that if `export` is `True`, `np_f` must be a
function directly under the `numpy` module, not under any submodule of
`numpy` (e.g. `numpy.random`).
Returns:
A function decorator that attaches the docstring from `np_fun` to the
decorated function.
"""
np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
def decorator(f):
f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)
if export:
return np_export.np_export(np_fun_name)(f)
else:
return f
return decorator
# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-docstring-missing-newline,g-doc-return-or-yield,g-doc-args
@np_doc('finfo')
def finfo(dtype):
"""Note that currently it just forwards to the numpy namesake, while
tensorflow and numpy dtypes may have different properties."""
return np.finfo(_to_numpy_type(dtype))
# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-docstring-missing-newline,g-doc-return-or-yield,g-doc-args
def _maybe_get_dtype(x):
"""Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
# Don't put np.ndarray in this list, because np.result_type looks at the
# value (not just dtype) of np.ndarray to decide the result type.
if isinstance(x, numbers.Real):
return x
if isinstance(x, (core.Tensor, indexed_slices.IndexedSlices)):
return _to_numpy_type(x.dtype)
if isinstance(x, dtypes.DType):
return x.as_numpy_dtype
if isinstance(x, (list, tuple)):
raise ValueError('Got sequence')
return x
# Can't use np_doc because np.result_type is a builtin function.
@np_doc_only('result_type')
def result_type(*arrays_and_dtypes): # pylint: disable=missing-function-docstring
arrays_and_dtypes = [
_maybe_get_dtype(x) for x in nest.flatten(arrays_and_dtypes)
]
if not arrays_and_dtypes:
# If arrays_and_dtypes is an empty list, let numpy decide what the dtype is.
arrays_and_dtypes = [np.asarray([])]
return np_dtypes._result_type(*arrays_and_dtypes) # pylint: disable=protected-access
def _result_type_binary(t1, t2): # pylint: disable=missing-function-docstring
"""A specialization of result_type for 2 arguments for performance reasons."""
try:
return np_dtypes._result_type(_maybe_get_dtype(t1), # pylint: disable=protected-access
_maybe_get_dtype(t2)) # pylint: disable=protected-access
except ValueError:
return result_type(t1, t2)
@np_doc('promote_types')
def promote_types(type1, type2): # pylint: disable=missing-function-docstring
type1 = _to_numpy_type(type1)
type2 = _to_numpy_type(type2)
return np_dtypes.canonicalize_dtype(np.promote_types(type1, type2))
def tf_broadcast(*args):
"""Broadcast tensors.
Args:
*args: a list of tensors whose shapes are broadcastable against each other.
Returns:
Tensors broadcasted to the common shape.
"""
if len(args) <= 1:
return args
sh = array_ops.shape(args[0])
for arg in args[1:]:
sh = array_ops.broadcast_dynamic_shape(sh, array_ops.shape(arg))
return [array_ops.broadcast_to(arg, sh) for arg in args]
# TODO(wangpeng): Move the following functions to a separate file and check for
# float dtypes in each of them.
def get_static_value(x):
"""A version of tf.get_static_value that returns None on float dtypes.
It returns None on float dtypes in order to avoid breaking gradients.
Args:
x: a tensor.
Returns:
Same as `tf.get_static_value`, except that it returns None when `x` has a
float dtype.
"""
if isinstance(x, core.Tensor) and (x.dtype.is_floating or x.dtype.is_complex):
return None
return tensor_util.constant_value(x)
def _maybe_static(x):
value = get_static_value(x)
if value is None:
return x
else:
return value
# All the following functions exist becaues get_static_value can't handle
# their TF counterparts.
def cond(pred, true_fn, false_fn):
"""A version of tf.cond that tries to evaluate the condition."""
v = get_static_value(pred)
if v is None:
return control_flow_ops.cond(pred, true_fn, false_fn)
if v:
return true_fn()
else:
return false_fn()
def add(a, b):
"""A version of tf.add that eagerly evaluates if possible."""
return _maybe_static(a) + _maybe_static(b)
def subtract(a, b):
"""A version of tf.subtract that eagerly evaluates if possible."""
return _maybe_static(a) - _maybe_static(b)
def greater(a, b):
"""A version of tf.greater that eagerly evaluates if possible."""
return _maybe_static(a) > _maybe_static(b)
def greater_equal(a, b):
"""A version of tf.greater_equal that eagerly evaluates if possible."""
return _maybe_static(a) >= _maybe_static(b)
def less_equal(a, b):
"""A version of tf.less_equal that eagerly evaluates if possible."""
return _maybe_static(a) <= _maybe_static(b)
def logical_and(a, b):
"""A version of tf.logical_and that eagerly evaluates if possible."""
a_value = get_static_value(a)
if a_value is not None:
if np.isscalar(a_value):
if a_value:
return _maybe_static(b)
else:
return a_value
else:
return a_value & _maybe_static(b)
else:
return a & _maybe_static(b)
def logical_or(a, b):
"""A version of tf.logical_or that eagerly evaluates if possible."""
a_value = get_static_value(a)
if a_value is not None:
if np.isscalar(a_value):
if a_value:
return a_value
else:
return _maybe_static(b)
else:
return a_value | _maybe_static(b)
else:
return a | _maybe_static(b)
def getitem(a, slice_spec):
"""A version of __getitem__ that eagerly evaluates if possible."""
return _maybe_static(a)[slice_spec]
def reduce_all(input_tensor, axis=None, keepdims=False):
"""A version of tf.reduce_all that eagerly evaluates if possible."""
v = get_static_value(input_tensor)
if v is None:
return math_ops.reduce_all(input_tensor, axis=axis, keepdims=keepdims)
else:
return v.all(axis=axis, keepdims=keepdims)
def reduce_any(input_tensor, axis=None, keepdims=False):
"""A version of tf.reduce_any that eagerly evaluates if possible."""
v = get_static_value(input_tensor)
if v is None:
return math_ops.reduce_any(input_tensor, axis=axis, keepdims=keepdims)
else:
return v.any(axis=axis, keepdims=keepdims)
def tf_rank(t):
r = t.shape.rank
if r is not None:
return r
return array_ops.rank(t)
|
tictakk/servo
|
refs/heads/ticbranch
|
tests/wpt/web-platform-tests/html/infrastructure/urls/resolving-urls/query-encoding/resources/blank.py
|
253
|
def main(request, response):
return [("Content-Type", "text/html; charset=%s" % (request.GET['encoding']))], ""
|
alshedivat/tensorflow
|
refs/heads/master
|
tensorflow/contrib/learn/python/learn/estimators/tensor_signature_test.py
|
137
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.estimators.tensor_signature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class TensorSignatureTest(test.TestCase):
def testTensorPlaceholderNone(self):
self.assertEqual(None,
tensor_signature.create_placeholders_from_signatures(None))
def testTensorSignatureNone(self):
self.assertEqual(None, tensor_signature.create_signatures(None))
def testTensorSignatureCompatible(self):
placeholder_a = array_ops.placeholder(
name='test', shape=[None, 100], dtype=dtypes.int32)
placeholder_b = array_ops.placeholder(
name='another', shape=[256, 100], dtype=dtypes.int32)
placeholder_c = array_ops.placeholder(
name='mismatch', shape=[256, 100], dtype=dtypes.float32)
placeholder_d = array_ops.placeholder(
name='mismatch', shape=[128, 100], dtype=dtypes.int32)
signatures = tensor_signature.create_signatures(placeholder_a)
self.assertTrue(tensor_signature.tensors_compatible(None, None))
self.assertFalse(tensor_signature.tensors_compatible(None, signatures))
self.assertFalse(tensor_signature.tensors_compatible(placeholder_a, None))
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_a, signatures))
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_b, signatures))
self.assertFalse(
tensor_signature.tensors_compatible(placeholder_c, signatures))
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_d, signatures))
inputs = {'a': placeholder_a}
signatures = tensor_signature.create_signatures(inputs)
self.assertTrue(tensor_signature.tensors_compatible(inputs, signatures))
self.assertFalse(
tensor_signature.tensors_compatible(placeholder_a, signatures))
self.assertFalse(
tensor_signature.tensors_compatible(placeholder_b, signatures))
self.assertFalse(
tensor_signature.tensors_compatible({
'b': placeholder_b
}, signatures))
self.assertTrue(
tensor_signature.tensors_compatible({
'a': placeholder_b,
'c': placeholder_c
}, signatures))
self.assertFalse(
tensor_signature.tensors_compatible({
'a': placeholder_c
}, signatures))
def testSparseTensorCompatible(self):
t = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
signatures = tensor_signature.create_signatures(t)
self.assertTrue(tensor_signature.tensors_compatible(t, signatures))
def testTensorSignaturePlaceholders(self):
placeholder_a = array_ops.placeholder(
name='test', shape=[None, 100], dtype=dtypes.int32)
signatures = tensor_signature.create_signatures(placeholder_a)
placeholder_out = tensor_signature.create_placeholders_from_signatures(
signatures)
self.assertEqual(placeholder_out.dtype, placeholder_a.dtype)
self.assertTrue(placeholder_out.get_shape().is_compatible_with(
placeholder_a.get_shape()))
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_out, signatures))
inputs = {'a': placeholder_a}
signatures = tensor_signature.create_signatures(inputs)
placeholders_out = tensor_signature.create_placeholders_from_signatures(
signatures)
self.assertEqual(placeholders_out['a'].dtype, placeholder_a.dtype)
self.assertTrue(placeholders_out['a'].get_shape().is_compatible_with(
placeholder_a.get_shape()))
self.assertTrue(
tensor_signature.tensors_compatible(placeholders_out, signatures))
def testSparseTensorSignaturePlaceholders(self):
tensor = sparse_tensor.SparseTensor(
values=[1.0, 2.0], indices=[[0, 2], [0, 3]], dense_shape=[5, 5])
signature = tensor_signature.create_signatures(tensor)
placeholder = tensor_signature.create_placeholders_from_signatures(
signature)
self.assertTrue(isinstance(placeholder, sparse_tensor.SparseTensor))
self.assertEqual(placeholder.values.dtype, tensor.values.dtype)
def testTensorSignatureExampleParserSingle(self):
examples = array_ops.placeholder(
name='example', shape=[None], dtype=dtypes.string)
placeholder_a = array_ops.placeholder(
name='test', shape=[None, 100], dtype=dtypes.int32)
signatures = tensor_signature.create_signatures(placeholder_a)
result = tensor_signature.create_example_parser_from_signatures(signatures,
examples)
self.assertTrue(tensor_signature.tensors_compatible(result, signatures))
new_signatures = tensor_signature.create_signatures(result)
self.assertTrue(new_signatures.is_compatible_with(signatures))
def testTensorSignatureExampleParserDict(self):
examples = array_ops.placeholder(
name='example', shape=[None], dtype=dtypes.string)
placeholder_a = array_ops.placeholder(
name='test', shape=[None, 100], dtype=dtypes.int32)
placeholder_b = array_ops.placeholder(
name='bb', shape=[None, 100], dtype=dtypes.float64)
inputs = {'a': placeholder_a, 'b': placeholder_b}
signatures = tensor_signature.create_signatures(inputs)
result = tensor_signature.create_example_parser_from_signatures(signatures,
examples)
self.assertTrue(tensor_signature.tensors_compatible(result, signatures))
new_signatures = tensor_signature.create_signatures(result)
self.assertTrue(new_signatures['a'].is_compatible_with(signatures['a']))
self.assertTrue(new_signatures['b'].is_compatible_with(signatures['b']))
def testUnknownShape(self):
placeholder_unk = array_ops.placeholder(
name='unk', shape=None, dtype=dtypes.string)
placeholder_a = array_ops.placeholder(
name='a', shape=[None], dtype=dtypes.string)
placeholder_b = array_ops.placeholder(
name='b', shape=[128, 2], dtype=dtypes.string)
placeholder_c = array_ops.placeholder(
name='c', shape=[128, 2], dtype=dtypes.int32)
unk_signature = tensor_signature.create_signatures(placeholder_unk)
# Tensors of same dtype match unk shape signature.
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_unk, unk_signature))
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_a, unk_signature))
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_b, unk_signature))
self.assertFalse(
tensor_signature.tensors_compatible(placeholder_c, unk_signature))
string_signature = tensor_signature.create_signatures(placeholder_a)
int_signature = tensor_signature.create_signatures(placeholder_c)
# Unk shape Tensor matche signatures same dtype.
self.assertTrue(
tensor_signature.tensors_compatible(placeholder_unk, string_signature))
self.assertFalse(
tensor_signature.tensors_compatible(placeholder_unk, int_signature))
if __name__ == '__main__':
test.main()
|
40223209/test111
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/this.py
|
948
|
s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
|
nikhilraog/boto
|
refs/heads/develop
|
tests/unit/utils/test_utils.py
|
63
|
# Copyright (c) 2010 Robert Mela
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.compat import mock, unittest
import datetime
import hashlib
import hmac
import locale
import time
import boto.utils
from boto.utils import Password
from boto.utils import pythonize_name
from boto.utils import _build_instance_metadata_url
from boto.utils import get_instance_userdata
from boto.utils import retry_url
from boto.utils import LazyLoadMetadata
from boto.compat import json, _thread
@unittest.skip("http://bugs.python.org/issue7980")
class TestThreadImport(unittest.TestCase):
def test_strptime(self):
def f():
for m in range(1, 13):
for d in range(1,29):
boto.utils.parse_ts('2013-01-01T00:00:00Z')
for _ in range(10):
_thread.start_new_thread(f, ())
time.sleep(3)
class TestPassword(unittest.TestCase):
"""Test basic password functionality"""
def clstest(self, cls):
"""Insure that password.__eq__ hashes test value before compare."""
password = cls('foo')
self.assertNotEquals(password, 'foo')
password.set('foo')
hashed = str(password)
self.assertEquals(password, 'foo')
self.assertEquals(password.str, hashed)
password = cls(hashed)
self.assertNotEquals(password.str, 'foo')
self.assertEquals(password, 'foo')
self.assertEquals(password.str, hashed)
def test_aaa_version_1_9_default_behavior(self):
self.clstest(Password)
def test_custom_hashclass(self):
class SHA224Password(Password):
hashfunc = hashlib.sha224
password = SHA224Password()
password.set('foo')
self.assertEquals(hashlib.sha224(b'foo').hexdigest(), str(password))
def test_hmac(self):
def hmac_hashfunc(cls, msg):
if not isinstance(msg, bytes):
msg = msg.encode('utf-8')
return hmac.new(b'mysecretkey', msg)
class HMACPassword(Password):
hashfunc = hmac_hashfunc
self.clstest(HMACPassword)
password = HMACPassword()
password.set('foo')
self.assertEquals(str(password),
hmac.new(b'mysecretkey', b'foo').hexdigest())
def test_constructor(self):
hmac_hashfunc = lambda msg: hmac.new(b'mysecretkey', msg)
password = Password(hashfunc=hmac_hashfunc)
password.set('foo')
self.assertEquals(password.str,
hmac.new(b'mysecretkey', b'foo').hexdigest())
class TestPythonizeName(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(pythonize_name(''), '')
def test_all_lower_case(self):
self.assertEqual(pythonize_name('lowercase'), 'lowercase')
def test_all_upper_case(self):
self.assertEqual(pythonize_name('UPPERCASE'), 'uppercase')
def test_camel_case(self):
self.assertEqual(pythonize_name('OriginallyCamelCased'),
'originally_camel_cased')
def test_already_pythonized(self):
self.assertEqual(pythonize_name('already_pythonized'),
'already_pythonized')
def test_multiple_upper_cased_letters(self):
self.assertEqual(pythonize_name('HTTPRequest'), 'http_request')
self.assertEqual(pythonize_name('RequestForHTTP'), 'request_for_http')
def test_string_with_numbers(self):
self.assertEqual(pythonize_name('HTTPStatus200Ok'), 'http_status_200_ok')
class TestBuildInstanceMetadataURL(unittest.TestCase):
def test_normal(self):
# This is the all-defaults case.
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
'meta-data/'
),
'http://169.254.169.254/latest/meta-data/'
)
def test_custom_path(self):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'latest',
'dynamic/'
),
'http://169.254.169.254/latest/dynamic/'
)
def test_custom_version(self):
self.assertEqual(_build_instance_metadata_url(
'http://169.254.169.254',
'1.0',
'meta-data/'
),
'http://169.254.169.254/1.0/meta-data/'
)
def test_custom_url(self):
self.assertEqual(_build_instance_metadata_url(
'http://10.0.1.5',
'latest',
'meta-data/'
),
'http://10.0.1.5/latest/meta-data/'
)
def test_all_custom(self):
self.assertEqual(_build_instance_metadata_url(
'http://10.0.1.5',
'2013-03-22',
'user-data'
),
'http://10.0.1.5/2013-03-22/user-data'
)
class TestRetryURL(unittest.TestCase):
def setUp(self):
self.urlopen_patch = mock.patch('boto.compat.urllib.request.urlopen')
self.opener_patch = mock.patch('boto.compat.urllib.request.build_opener')
self.urlopen = self.urlopen_patch.start()
self.opener = self.opener_patch.start()
def tearDown(self):
self.urlopen_patch.stop()
self.opener_patch.stop()
def set_normal_response(self, response):
fake_response = mock.Mock()
fake_response.read.return_value = response
self.urlopen.return_value = fake_response
def set_no_proxy_allowed_response(self, response):
fake_response = mock.Mock()
fake_response.read.return_value = response
self.opener.return_value.open.return_value = fake_response
def test_retry_url_uses_proxy(self):
self.set_normal_response('normal response')
self.set_no_proxy_allowed_response('no proxy response')
response = retry_url('http://10.10.10.10/foo', num_retries=1)
self.assertEqual(response, 'no proxy response')
def test_retry_url_using_bytes_and_string_response(self):
test_value = 'normal response'
fake_response = mock.Mock()
# test using unicode
fake_response.read.return_value = test_value
self.opener.return_value.open.return_value = fake_response
response = retry_url('http://10.10.10.10/foo', num_retries=1)
self.assertEqual(response, test_value)
# test using bytes
fake_response.read.return_value = test_value.encode('utf-8')
self.opener.return_value.open.return_value = fake_response
response = retry_url('http://10.10.10.10/foo', num_retries=1)
self.assertEqual(response, test_value)
class TestLazyLoadMetadata(unittest.TestCase):
def setUp(self):
self.retry_url_patch = mock.patch('boto.utils.retry_url')
boto.utils.retry_url = self.retry_url_patch.start()
def tearDown(self):
self.retry_url_patch.stop()
def set_normal_response(self, data):
# here "data" should be a list of return values in some order
fake_response = mock.Mock()
fake_response.side_effect = data
boto.utils.retry_url = fake_response
def test_meta_data_with_invalid_json_format_happened_once(self):
# here "key_data" will be stored in the "self._leaves"
# when the class "LazyLoadMetadata" initialized
key_data = "test"
invalid_data = '{"invalid_json_format" : true,}'
valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
url = "/".join(["http://169.254.169.254", key_data])
num_retries = 2
self.set_normal_response([key_data, invalid_data, valid_data])
response = LazyLoadMetadata(url, num_retries)
self.assertEqual(list(response.values())[0], json.loads(valid_data))
def test_meta_data_with_invalid_json_format_happened_twice(self):
key_data = "test"
invalid_data = '{"invalid_json_format" : true,}'
valid_data = '{ "%s" : {"valid_json_format": true}}' % key_data
url = "/".join(["http://169.254.169.254", key_data])
num_retries = 2
self.set_normal_response([key_data, invalid_data, invalid_data])
response = LazyLoadMetadata(url, num_retries)
with self.assertRaises(ValueError):
response.values()[0]
def test_user_data(self):
self.set_normal_response(['foo'])
userdata = get_instance_userdata()
self.assertEqual('foo', userdata)
boto.utils.retry_url.assert_called_with(
'http://169.254.169.254/latest/user-data',
retry_on_404=False,
num_retries=5, timeout=None)
def test_user_data_timeout(self):
self.set_normal_response(['foo'])
userdata = get_instance_userdata(timeout=1, num_retries=2)
self.assertEqual('foo', userdata)
boto.utils.retry_url.assert_called_with(
'http://169.254.169.254/latest/user-data',
retry_on_404=False,
num_retries=2, timeout=1)
class TestStringToDatetimeParsing(unittest.TestCase):
""" Test string to datetime parsing """
def setUp(self):
self._saved = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
except locale.Error:
self.skipTest('Unsupported locale setting')
def tearDown(self):
locale.setlocale(locale.LC_ALL, self._saved)
def test_nonus_locale(self):
test_string = 'Thu, 15 May 2014 09:06:03 GMT'
# Default strptime shoudl fail
with self.assertRaises(ValueError):
datetime.datetime.strptime(test_string, boto.utils.RFC1123)
# Our parser should succeed
result = boto.utils.parse_ts(test_string)
self.assertEqual(2014, result.year)
self.assertEqual(5, result.month)
self.assertEqual(15, result.day)
self.assertEqual(9, result.hour)
self.assertEqual(6, result.minute)
class TestHostIsIPV6(unittest.TestCase):
def test_is_ipv6_no_brackets(self):
hostname = 'bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be'
result = boto.utils.host_is_ipv6(hostname)
self.assertTrue(result)
def test_is_ipv6_with_brackets(self):
hostname = '[bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be]'
result = boto.utils.host_is_ipv6(hostname)
self.assertTrue(result)
def test_is_ipv6_with_brackets_and_port(self):
hostname = '[bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be]:8080'
result = boto.utils.host_is_ipv6(hostname)
self.assertTrue(result)
def test_is_ipv6_no_brackets_abbreviated(self):
hostname = 'bf1d:cb48:4513::'
result = boto.utils.host_is_ipv6(hostname)
self.assertTrue(result)
def test_is_ipv6_with_brackets_abbreviated(self):
hostname = '[bf1d:cb48:4513::'
result = boto.utils.host_is_ipv6(hostname)
self.assertTrue(result)
def test_is_ipv6_with_brackets_and_port_abbreviated(self):
hostname = '[bf1d:cb48:4513::]:8080'
result = boto.utils.host_is_ipv6(hostname)
self.assertTrue(result)
def test_empty_string(self):
result = boto.utils.host_is_ipv6('')
self.assertFalse(result)
def test_not_of_string_type(self):
hostnames = [None, 0, False, [], {}]
for h in hostnames:
result = boto.utils.host_is_ipv6(h)
self.assertFalse(result)
def test_ipv4_no_port(self):
result = boto.utils.host_is_ipv6('192.168.1.1')
self.assertFalse(result)
def test_ipv4_with_port(self):
result = boto.utils.host_is_ipv6('192.168.1.1:8080')
self.assertFalse(result)
def test_hostnames_are_not_ipv6_with_port(self):
result = boto.utils.host_is_ipv6('example.org:8080')
self.assertFalse(result)
def test_hostnames_are_not_ipv6_without_port(self):
result = boto.utils.host_is_ipv6('example.org')
self.assertFalse(result)
class TestParseHost(unittest.TestCase):
def test_parses_ipv6_hosts_no_brackets(self):
host = 'bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be'
result = boto.utils.parse_host(host)
self.assertEquals(result, host)
def test_parses_ipv6_hosts_with_brackets_stripping_them(self):
host = '[bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be]'
result = boto.utils.parse_host(host)
self.assertEquals(result, 'bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be')
def test_parses_ipv6_hosts_with_brackets_and_port(self):
host = '[bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be]:8080'
result = boto.utils.parse_host(host)
self.assertEquals(result, 'bf1d:cb48:4513:d1f1:efdd:b290:9ff9:64be')
def test_parses_ipv4_hosts(self):
host = '10.0.1.1'
result = boto.utils.parse_host(host)
self.assertEquals(result, host)
def test_parses_ipv4_hosts_with_port(self):
host = '192.168.168.200:8080'
result = boto.utils.parse_host(host)
self.assertEquals(result, '192.168.168.200')
def test_parses_hostnames_with_port(self):
host = 'example.org:8080'
result = boto.utils.parse_host(host)
self.assertEquals(result, 'example.org')
def test_parses_hostnames_without_port(self):
host = 'example.org'
result = boto.utils.parse_host(host)
self.assertEquals(result, host)
if __name__ == '__main__':
unittest.main()
|
CallmeTorre/Idalia
|
refs/heads/master
|
ESCOM/SolicitarDocumento/urls.py
|
1
|
from django.conf.urls import url
import views as v_views
urlpatterns = [
url(r'^paso1/$', v_views.SolicitarDocumentoPaso1.as_view(), name='paso1SD'),
url(r'^paso2/$', v_views.SolicitarDocumentoPaso2.as_view(), name='paso2SD'),
url(r'^paso3/$', v_views.SolicitarDocumentoPaso3.as_view(), name='paso3SD'),
]
|
utkarsh-goswami/erpnext
|
refs/heads/develop
|
erpnext/accounts/doctype/payment_reconciliation_payment/payment_reconciliation_payment.py
|
121
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PaymentReconciliationPayment(Document):
pass
|
b-carter/numpy
|
refs/heads/master
|
numpy/distutils/command/build_src.py
|
40
|
""" Build swig and f2py sources.
"""
from __future__ import division, absolute_import, print_function
import os
import re
import sys
import shlex
import copy
from distutils.command import build_ext
from distutils.dep_util import newer_group, newer
from distutils.util import get_platform
from distutils.errors import DistutilsError, DistutilsSetupError
# this import can't be done here, as it uses numpy stuff only available
# after it's installed
#import numpy.f2py
from numpy.distutils import log
from numpy.distutils.misc_util import (
fortran_ext_match, appendpath, is_string, is_sequence, get_cmd
)
from numpy.distutils.from_template import process_file as process_f_file
from numpy.distutils.conv_template import process_file as process_c_file
def subst_vars(target, source, d):
"""Substitute any occurrence of @foo@ by d['foo'] from source file into
target."""
var = re.compile('@([a-zA-Z_]+)@')
fs = open(source, 'r')
try:
ft = open(target, 'w')
try:
for l in fs:
m = var.search(l)
if m:
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
else:
ft.write(l)
finally:
ft.close()
finally:
fs.close()
class build_src(build_ext.build_ext):
description = "build sources from SWIG, F2PY files or a function"
user_options = [
('build-src=', 'd', "directory to \"build\" sources to"),
('f2py-opts=', None, "list of f2py command line options"),
('swig=', None, "path to the SWIG executable"),
('swig-opts=', None, "list of SWIG command line options"),
('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
('force', 'f', "forcibly build everything (ignore file timestamps)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
]
boolean_options = ['force', 'inplace']
help_options = []
def initialize_options(self):
self.extensions = None
self.package = None
self.py_modules = None
self.py_modules_dict = None
self.build_src = None
self.build_lib = None
self.build_base = None
self.force = None
self.inplace = None
self.package_dir = None
self.f2pyflags = None # obsolete
self.f2py_opts = None
self.swigflags = None # obsolete
self.swig_opts = None
self.swig_cpp = None
self.swig = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('force', 'force'))
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
self.libraries = self.distribution.libraries or []
self.py_modules = self.distribution.py_modules or []
self.data_files = self.distribution.data_files or []
if self.build_src is None:
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
# py_modules_dict is used in build_py.find_package_modules
self.py_modules_dict = {}
if self.f2pyflags:
if self.f2py_opts:
log.warn('ignoring --f2pyflags as --f2py-opts already used')
else:
self.f2py_opts = self.f2pyflags
self.f2pyflags = None
if self.f2py_opts is None:
self.f2py_opts = []
else:
self.f2py_opts = shlex.split(self.f2py_opts)
if self.swigflags:
if self.swig_opts:
log.warn('ignoring --swigflags as --swig-opts already used')
else:
self.swig_opts = self.swigflags
self.swigflags = None
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = shlex.split(self.swig_opts)
# use options from build_ext command
build_ext = self.get_finalized_command('build_ext')
if self.inplace is None:
self.inplace = build_ext.inplace
if self.swig_cpp is None:
self.swig_cpp = build_ext.swig_cpp
for c in ['swig', 'swig_opt']:
o = '--'+c.replace('_', '-')
v = getattr(build_ext, c, None)
if v:
if getattr(self, c):
log.warn('both build_src and build_ext define %s option' % (o))
else:
log.info('using "%s=%s" option from build_ext command' % (o, v))
setattr(self, c, v)
def run(self):
log.info("build_src")
if not (self.extensions or self.libraries):
return
self.build_sources()
def build_sources(self):
if self.inplace:
self.get_package_dir = \
self.get_finalized_command('build_py').get_package_dir
self.build_py_modules_sources()
for libname_info in self.libraries:
self.build_library_sources(*libname_info)
if self.extensions:
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension_sources(ext)
self.build_data_files_sources()
self.build_npy_pkg_config()
def build_data_files_sources(self):
if not self.data_files:
return
log.info('building data_files sources')
from numpy.distutils.misc_util import get_data_files
new_data_files = []
for data in self.data_files:
if isinstance(data, str):
new_data_files.append(data)
elif isinstance(data, tuple):
d, files = data
if self.inplace:
build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
else:
build_dir = os.path.join(self.build_src, d)
funcs = [f for f in files if hasattr(f, '__call__')]
files = [f for f in files if not hasattr(f, '__call__')]
for f in funcs:
if f.__code__.co_argcount==1:
s = f(build_dir)
else:
s = f()
if s is not None:
if isinstance(s, list):
files.extend(s)
elif isinstance(s, str):
files.append(s)
else:
raise TypeError(repr(s))
filenames = get_data_files((d, files))
new_data_files.append((d, filenames))
else:
raise TypeError(repr(data))
self.data_files[:] = new_data_files
def _build_npy_pkg_config(self, info, gd):
import shutil
template, install_dir, subst_dict = info
template_dir = os.path.dirname(template)
for k, v in gd.items():
subst_dict[k] = v
if self.inplace == 1:
generated_dir = os.path.join(template_dir, install_dir)
else:
generated_dir = os.path.join(self.build_src, template_dir,
install_dir)
generated = os.path.basename(os.path.splitext(template)[0])
generated_path = os.path.join(generated_dir, generated)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
subst_vars(generated_path, template, subst_dict)
# Where to install relatively to install prefix
full_install_dir = os.path.join(template_dir, install_dir)
return full_install_dir, generated_path
def build_npy_pkg_config(self):
log.info('build_src: building npy-pkg config files')
# XXX: another ugly workaround to circumvent distutils brain damage. We
# need the install prefix here, but finalizing the options of the
# install command when only building sources cause error. Instead, we
# copy the install command instance, and finalize the copy so that it
# does not disrupt how distutils want to do things when with the
# original install command instance.
install_cmd = copy.copy(get_cmd('install'))
if not install_cmd.finalized == 1:
install_cmd.finalize_options()
build_npkg = False
gd = {}
if self.inplace == 1:
top_prefix = '.'
build_npkg = True
elif hasattr(install_cmd, 'install_libbase'):
top_prefix = install_cmd.install_libbase
build_npkg = True
if build_npkg:
for pkg, infos in self.distribution.installed_pkg_config.items():
pkg_path = self.distribution.package_dir[pkg]
prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
d = {'prefix': prefix}
for info in infos:
install_dir, generated = self._build_npy_pkg_config(info, d)
self.distribution.data_files.append((install_dir,
[generated]))
def build_py_modules_sources(self):
if not self.py_modules:
return
log.info('building py_modules sources')
new_py_modules = []
for source in self.py_modules:
if is_sequence(source) and len(source)==3:
package, module_base, source = source
if self.inplace:
build_dir = self.get_package_dir(package)
else:
build_dir = os.path.join(self.build_src,
os.path.join(*package.split('.')))
if hasattr(source, '__call__'):
target = os.path.join(build_dir, module_base + '.py')
source = source(target)
if source is None:
continue
modules = [(package, module_base, source)]
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
self.py_modules_dict[package] += modules
else:
new_py_modules.append(source)
self.py_modules[:] = new_py_modules
def build_library_sources(self, lib_name, build_info):
sources = list(build_info.get('sources', []))
if not sources:
return
log.info('building library "%s" sources' % (lib_name))
sources = self.generate_sources(sources, (lib_name, build_info))
sources = self.template_sources(sources, (lib_name, build_info))
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
self.package, h_files)
#for f in h_files:
# self.distribution.headers.append((lib_name,f))
build_info['sources'] = sources
return
def build_extension_sources(self, ext):
sources = list(ext.sources)
log.info('building extension "%s" sources' % (ext.name))
fullname = self.get_ext_fullname(ext.name)
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
if self.inplace:
self.ext_target_dir = self.get_package_dir(package)
sources = self.generate_sources(sources, ext)
sources = self.template_sources(sources, ext)
sources = self.swig_sources(sources, ext)
sources = self.f2py_sources(sources, ext)
sources = self.pyrex_sources(sources, ext)
sources, py_files = self.filter_py_files(sources)
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
modules = []
for f in py_files:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
self.py_modules_dict[package] += modules
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
package, h_files)
#for f in h_files:
# self.distribution.headers.append((package,f))
ext.sources = sources
def generate_sources(self, sources, extension):
new_sources = []
func_sources = []
for source in sources:
if is_string(source):
new_sources.append(source)
else:
func_sources.append(source)
if not func_sources:
return new_sources
if self.inplace and not is_sequence(extension):
build_dir = self.ext_target_dir
else:
if is_sequence(extension):
name = extension[0]
# if 'include_dirs' not in extension[1]:
# extension[1]['include_dirs'] = []
# incl_dirs = extension[1]['include_dirs']
else:
name = extension.name
# incl_dirs = extension.include_dirs
#if self.build_src not in incl_dirs:
# incl_dirs.append(self.build_src)
build_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
self.mkpath(build_dir)
for func in func_sources:
source = func(extension, build_dir)
if not source:
continue
if is_sequence(source):
[log.info(" adding '%s' to sources." % (s,)) for s in source]
new_sources.extend(source)
else:
log.info(" adding '%s' to sources." % (source,))
new_sources.append(source)
return new_sources
def filter_py_files(self, sources):
return self.filter_files(sources, ['.py'])
def filter_h_files(self, sources):
return self.filter_files(sources, ['.h', '.hpp', '.inc'])
def filter_files(self, sources, exts = []):
new_sources = []
files = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext in exts:
files.append(source)
else:
new_sources.append(source)
return new_sources, files
def template_sources(self, sources, extension):
new_sources = []
if is_sequence(extension):
depends = extension[1].get('depends')
include_dirs = extension[1].get('include_dirs')
else:
depends = extension.depends
include_dirs = extension.include_dirs
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.src': # Template file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
self.mkpath(target_dir)
target_file = os.path.join(target_dir, os.path.basename(base))
if (self.force or newer_group([source] + depends, target_file)):
if _f_pyf_ext_match(base):
log.info("from_template:> %s" % (target_file))
outstr = process_f_file(source)
else:
log.info("conv_template:> %s" % (target_file))
outstr = process_c_file(source)
fid = open(target_file, 'w')
fid.write(outstr)
fid.close()
if _header_ext_match(target_file):
d = os.path.dirname(target_file)
if d not in include_dirs:
log.info(" adding '%s' to include_dirs." % (d))
include_dirs.append(d)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def pyrex_sources(self, sources, extension):
"""Pyrex not supported; this remains for Cython support (see below)"""
new_sources = []
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyx':
target_file = self.generate_a_pyrex_source(base, ext_name,
source,
extension)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def generate_a_pyrex_source(self, base, ext_name, source, extension):
"""Pyrex is not supported, but some projects monkeypatch this method.
That allows compiling Cython code, see gh-6955.
This method will remain here for compatibility reasons.
"""
return []
def f2py_sources(self, sources, extension):
new_sources = []
f2py_sources = []
f_sources = []
f2py_targets = {}
target_dirs = []
ext_name = extension.name.split('.')[-1]
skip_f2py = 0
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyf': # F2PY interface file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
if os.path.isfile(source):
name = get_f2py_modulename(source)
if name != ext_name:
raise DistutilsSetupError('mismatch of extension names: %s '
'provides %r but expected %r' % (
source, name, ext_name))
target_file = os.path.join(target_dir, name+'module.c')
else:
log.debug(' source %s does not exist: skipping f2py\'ing.' \
% (source))
name = ext_name
skip_f2py = 1
target_file = os.path.join(target_dir, name+'module.c')
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %smodule.c was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = os.path.join(target_dir, name+'module.c')
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.info(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
f2py_sources.append(source)
f2py_targets[source] = target_file
new_sources.append(target_file)
elif fortran_ext_match(ext):
f_sources.append(source)
else:
new_sources.append(source)
if not (f2py_sources or f_sources):
return new_sources
for d in target_dirs:
self.mkpath(d)
f2py_options = extension.f2py_options + self.f2py_opts
if self.distribution.libraries:
for name, build_info in self.distribution.libraries:
if name in extension.libraries:
f2py_options.extend(build_info.get('f2py_options', []))
log.info("f2py options: %s" % (f2py_options))
if f2py_sources:
if len(f2py_sources) != 1:
raise DistutilsSetupError(
'only one .pyf file is allowed per extension module but got'\
' more: %r' % (f2py_sources,))
source = f2py_sources[0]
target_file = f2py_targets[source]
target_dir = os.path.dirname(target_file) or '.'
depends = [source] + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
log.info("f2py: %s" % (source))
import numpy.f2py
numpy.f2py.run_main(f2py_options
+ ['--build-dir', target_dir, source])
else:
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
else:
#XXX TODO: --inplace support for sdist command
if is_sequence(extension):
name = extension[0]
else: name = extension.name
target_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
target_file = os.path.join(target_dir, ext_name + 'module.c')
new_sources.append(target_file)
depends = f_sources + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
log.info("f2py:> %s" % (target_file))
self.mkpath(target_dir)
import numpy.f2py
numpy.f2py.run_main(f2py_options + ['--lower',
'--build-dir', target_dir]+\
['-m', ext_name]+f_sources)
else:
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
% (target_file))
if not os.path.isfile(target_file):
raise DistutilsError("f2py target file %r not generated" % (target_file,))
build_dir = os.path.join(self.build_src, target_dir)
target_c = os.path.join(build_dir, 'fortranobject.c')
target_h = os.path.join(build_dir, 'fortranobject.h')
log.info(" adding '%s' to sources." % (target_c))
new_sources.append(target_c)
if build_dir not in extension.include_dirs:
log.info(" adding '%s' to include_dirs." % (build_dir))
extension.include_dirs.append(build_dir)
if not skip_f2py:
import numpy.f2py
d = os.path.dirname(numpy.f2py.__file__)
source_c = os.path.join(d, 'src', 'fortranobject.c')
source_h = os.path.join(d, 'src', 'fortranobject.h')
if newer(source_c, target_c) or newer(source_h, target_h):
self.mkpath(os.path.dirname(target_c))
self.copy_file(source_c, target_c)
self.copy_file(source_h, target_h)
else:
if not os.path.isfile(target_c):
raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
if not os.path.isfile(target_h):
raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
for name_ext in ['-f2pywrappers.f', '-f2pywrappers2.f90']:
filename = os.path.join(target_dir, ext_name + name_ext)
if os.path.isfile(filename):
log.info(" adding '%s' to sources." % (filename))
f_sources.append(filename)
return new_sources + f_sources
def swig_sources(self, sources, extension):
# Assuming SWIG 1.3.14 or later. See compatibility note in
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
new_sources = []
swig_sources = []
swig_targets = {}
target_dirs = []
py_files = [] # swig generated .py files
target_ext = '.c'
if '-c++' in extension.swig_opts:
typ = 'c++'
is_cpp = True
extension.swig_opts.remove('-c++')
elif self.swig_cpp:
typ = 'c++'
is_cpp = True
else:
typ = None
is_cpp = False
skip_swig = 0
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.i': # SWIG interface file
# the code below assumes that the sources list
# contains not more than one .i SWIG interface file
if self.inplace:
target_dir = os.path.dirname(base)
py_target_dir = self.ext_target_dir
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
py_target_dir = target_dir
if os.path.isfile(source):
name = get_swig_modulename(source)
if name != ext_name[1:]:
raise DistutilsSetupError(
'mismatch of extension names: %s provides %r'
' but expected %r' % (source, name, ext_name[1:]))
if typ is None:
typ = get_swig_target(source)
is_cpp = typ=='c++'
else:
typ2 = get_swig_target(source)
if typ2 is None:
log.warn('source %r does not define swig target, assuming %s swig target' \
% (source, typ))
elif typ!=typ2:
log.warn('expected %r but source %r defines %r swig target' \
% (typ, source, typ2))
if typ2=='c++':
log.warn('resetting swig target to c++ (some targets may have .c extension)')
is_cpp = True
else:
log.warn('assuming that %r has c++ swig target' % (source))
if is_cpp:
target_ext = '.cpp'
target_file = os.path.join(target_dir, '%s_wrap%s' \
% (name, target_ext))
else:
log.warn(' source %s does not exist: skipping swig\'ing.' \
% (source))
name = ext_name[1:]
skip_swig = 1
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %s_wrap.{c,cpp} was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.warn(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
new_sources.append(target_file)
py_files.append(os.path.join(py_target_dir, name+'.py'))
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
if skip_swig:
return new_sources + py_files
for d in target_dirs:
self.mkpath(d)
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"] + extension.swig_opts
if is_cpp:
swig_cmd.append('-c++')
for d in extension.include_dirs:
swig_cmd.append('-I'+d)
for source in swig_sources:
target = swig_targets[source]
depends = [source] + extension.depends
if self.force or newer_group(depends, target, 'newer'):
log.info("%s: %s" % (os.path.basename(swig) \
+ (is_cpp and '++' or ''), source))
self.spawn(swig_cmd + self.swig_opts \
+ ["-o", target, '-outdir', py_target_dir, source])
else:
log.debug(" skipping '%s' swig interface (up-to-date)" \
% (source))
return new_sources + py_files
_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z', re.I).match
#### SWIG related auxiliary functions ####
_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
re.I).match
_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search
_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search
def get_swig_target(source):
f = open(source, 'r')
result = None
line = f.readline()
if _has_cpp_header(line):
result = 'c++'
if _has_c_header(line):
result = 'c'
f.close()
return result
def get_swig_modulename(source):
f = open(source, 'r')
name = None
for line in f:
m = _swig_module_name_match(line)
if m:
name = m.group('name')
break
f.close()
return name
def _find_swig_target(target_dir, name):
for ext in ['.cpp', '.c']:
target = os.path.join(target_dir, '%s_wrap%s' % (name, ext))
if os.path.isfile(target):
break
return target
#### F2PY related auxiliary functions ####
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'
r'__user__[\w_]*)', re.I).match
def get_f2py_modulename(source):
name = None
f = open(source)
for line in f:
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
f.close()
return name
##########################################
|
zorojean/scrapy
|
refs/heads/master
|
scrapy/utils/httpobj.py
|
169
|
"""Helper functions for scrapy.http objects (Request, Response)"""
import weakref
from six.moves.urllib.parse import urlparse
_urlparse_cache = weakref.WeakKeyDictionary()
def urlparse_cached(request_or_response):
"""Return urlparse.urlparse caching the result, where the argument can be a
Request or Response object
"""
if request_or_response not in _urlparse_cache:
_urlparse_cache[request_or_response] = urlparse(request_or_response.url)
return _urlparse_cache[request_or_response]
|
galengold/split70
|
refs/heads/master
|
qmk_firmware/lib/googletest/googletest/test/gtest_break_on_failure_unittest.py
|
2140
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
AustinRoy7/Pomodoro-timer
|
refs/heads/master
|
venv/Lib/encodings/hp_roman8.py
|
85
|
""" Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
Original source: LaserJet IIP Printer User's Manual HP part no
33471-90901, Hewlet-Packard, June 1989.
(Used with permission)
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hp-roman8',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xc0' # 0xA1 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc2' # 0xA2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc8' # 0xA3 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xca' # 0xA4 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xA5 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xce' # 0xA6 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xA7 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xb4' # 0xA8 -> ACUTE ACCENT
'\u02cb' # 0xA9 -> MODIFIER LETTER GRAVE ACCENT (MANDARIN CHINESE FOURTH TONE)
'\u02c6' # 0xAA -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\xa8' # 0xAB -> DIAERESIS
'\u02dc' # 0xAC -> SMALL TILDE
'\xd9' # 0xAD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xdb' # 0xAE -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u20a4' # 0xAF -> LIRA SIGN
'\xaf' # 0xB0 -> MACRON
'\xdd' # 0xB1 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xB2 -> LATIN SMALL LETTER Y WITH ACUTE
'\xb0' # 0xB3 -> DEGREE SIGN
'\xc7' # 0xB4 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xe7' # 0xB5 -> LATIN SMALL LETTER C WITH CEDILLA
'\xd1' # 0xB6 -> LATIN CAPITAL LETTER N WITH TILDE
'\xf1' # 0xB7 -> LATIN SMALL LETTER N WITH TILDE
'\xa1' # 0xB8 -> INVERTED EXCLAMATION MARK
'\xbf' # 0xB9 -> INVERTED QUESTION MARK
'\xa4' # 0xBA -> CURRENCY SIGN
'\xa3' # 0xBB -> POUND SIGN
'\xa5' # 0xBC -> YEN SIGN
'\xa7' # 0xBD -> SECTION SIGN
'\u0192' # 0xBE -> LATIN SMALL LETTER F WITH HOOK
'\xa2' # 0xBF -> CENT SIGN
'\xe2' # 0xC0 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xea' # 0xC1 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xf4' # 0xC2 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xfb' # 0xC3 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xe1' # 0xC4 -> LATIN SMALL LETTER A WITH ACUTE
'\xe9' # 0xC5 -> LATIN SMALL LETTER E WITH ACUTE
'\xf3' # 0xC6 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0xC7 -> LATIN SMALL LETTER U WITH ACUTE
'\xe0' # 0xC8 -> LATIN SMALL LETTER A WITH GRAVE
'\xe8' # 0xC9 -> LATIN SMALL LETTER E WITH GRAVE
'\xf2' # 0xCA -> LATIN SMALL LETTER O WITH GRAVE
'\xf9' # 0xCB -> LATIN SMALL LETTER U WITH GRAVE
'\xe4' # 0xCC -> LATIN SMALL LETTER A WITH DIAERESIS
'\xeb' # 0xCD -> LATIN SMALL LETTER E WITH DIAERESIS
'\xf6' # 0xCE -> LATIN SMALL LETTER O WITH DIAERESIS
'\xfc' # 0xCF -> LATIN SMALL LETTER U WITH DIAERESIS
'\xc5' # 0xD0 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xee' # 0xD1 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xd8' # 0xD2 -> LATIN CAPITAL LETTER O WITH STROKE
'\xc6' # 0xD3 -> LATIN CAPITAL LETTER AE
'\xe5' # 0xD4 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xed' # 0xD5 -> LATIN SMALL LETTER I WITH ACUTE
'\xf8' # 0xD6 -> LATIN SMALL LETTER O WITH STROKE
'\xe6' # 0xD7 -> LATIN SMALL LETTER AE
'\xc4' # 0xD8 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xec' # 0xD9 -> LATIN SMALL LETTER I WITH GRAVE
'\xd6' # 0xDA -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0xDB -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xc9' # 0xDC -> LATIN CAPITAL LETTER E WITH ACUTE
'\xef' # 0xDD -> LATIN SMALL LETTER I WITH DIAERESIS
'\xdf' # 0xDE -> LATIN SMALL LETTER SHARP S (GERMAN)
'\xd4' # 0xDF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xc1' # 0xE0 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0xE1 -> LATIN CAPITAL LETTER A WITH TILDE
'\xe3' # 0xE2 -> LATIN SMALL LETTER A WITH TILDE
'\xd0' # 0xE3 -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xf0' # 0xE4 -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xcd' # 0xE5 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xcc' # 0xE6 -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd3' # 0xE7 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd2' # 0xE8 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd5' # 0xE9 -> LATIN CAPITAL LETTER O WITH TILDE
'\xf5' # 0xEA -> LATIN SMALL LETTER O WITH TILDE
'\u0160' # 0xEB -> LATIN CAPITAL LETTER S WITH CARON
'\u0161' # 0xEC -> LATIN SMALL LETTER S WITH CARON
'\xda' # 0xED -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0178' # 0xEE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xff' # 0xEF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xde' # 0xF0 -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xfe' # 0xF1 -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb7' # 0xF2 -> MIDDLE DOT
'\xb5' # 0xF3 -> MICRO SIGN
'\xb6' # 0xF4 -> PILCROW SIGN
'\xbe' # 0xF5 -> VULGAR FRACTION THREE QUARTERS
'\u2014' # 0xF6 -> EM DASH
'\xbc' # 0xF7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xF8 -> VULGAR FRACTION ONE HALF
'\xaa' # 0xF9 -> FEMININE ORDINAL INDICATOR
'\xba' # 0xFA -> MASCULINE ORDINAL INDICATOR
'\xab' # 0xFB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u25a0' # 0xFC -> BLACK SQUARE
'\xbb' # 0xFD -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xb1' # 0xFE -> PLUS-MINUS SIGN
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
zhhf/charging
|
refs/heads/master
|
charging/agent/linux/iptables_manager.py
|
8
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import inspect
import os
from neutron.agent.linux import utils as linux_utils
from neutron.common import utils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def ensure_remove_chain(self, name, wrap=True):
"""Ensure the chain is removed.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
return
self.remove_chain(name, wrap)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chain = get_chain_name(chain, wrap)
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False,
root_helper=None, use_ipv6=False, namespace=None,
binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.root_helper = root_helper
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various nova components. We set it as the last
# chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name)
return self._apply_synchronized()
finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, root_helper=self.root_helper)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
self.execute(args, process_input='\n'.join(all_lines),
root_helper=self.root_helper)
LOG.debug(_("IPTablesManager.apply completed with success"))
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
orig_filter = [s for s in old_filter if chain_str in s.strip()]
dup_filter = [s for s in new_filter if chain_str in s.strip()]
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if orig_filter:
# grab the last entry, if there is one
old = orig_filter[-1]
chain_str = str(old).strip()
elif dup_filter:
# grab the last entry, if there is one
dup = dup_filter[-1]
chain_str = str(dup).strip()
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
orig_filter = [s for s in old_filter if rule_str in s.strip()]
dup_filter = [s for s in new_filter if rule_str in s.strip()]
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if orig_filter:
# grab the last entry, if there is one
old = orig_filter[-1]
rule_str = str(old).strip()
elif dup_filter:
# grab the last entry, if there is one
dup = dup_filter[-1]
rule_str = str(dup).strip()
# backup one index so we write the array correctly
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precendence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
cmd_tables += [('ip6tables', key) for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = (self.execute(args,
root_helper=self.root_helper))
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
jayceyxc/hue
|
refs/heads/master
|
desktop/core/ext-py/pysaml2-2.4.0/src/saml2/sigver.py
|
8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
""" Functions connected to signing and verifying.
Based on the use of xmlsec1 binaries and not the python xmlsec module.
"""
import base64
from binascii import hexlify
import hashlib
import logging
import random
import os
import ssl
from time import mktime
import urllib
from Crypto.PublicKey.RSA import importKey
from Crypto.Signature import PKCS1_v1_5
from Crypto.Util.asn1 import DerSequence
from Crypto.PublicKey import RSA
from saml2.cert import OpenSSLWrapper
from saml2.extension import pefim
from saml2.saml import EncryptedAssertion
import xmldsig as ds
from saml2 import samlp, SamlBase
from saml2 import SAMLError
from saml2 import extension_elements_to_elements
from saml2 import class_name
from saml2 import saml
from saml2 import ExtensionElement
from saml2 import VERSION
from saml2.s_utils import sid, rndstr
from saml2.s_utils import Unsupported
from saml2.time_util import instant
from saml2.time_util import utc_now
from saml2.time_util import str_to_time
from tempfile import NamedTemporaryFile, mkdtemp
from subprocess import Popen, PIPE
from xmldsig import SIG_RSA_SHA1
from xmldsig import SIG_RSA_SHA224
from xmldsig import SIG_RSA_SHA256
from xmldsig import SIG_RSA_SHA384
from xmldsig import SIG_RSA_SHA512
from xmlenc import EncryptionMethod
from xmlenc import EncryptedKey
from xmlenc import CipherData
from xmlenc import CipherValue
from xmlenc import EncryptedData
from Crypto.Hash import SHA
from Crypto.Hash import SHA224
from Crypto.Hash import SHA256
from Crypto.Hash import SHA384
from Crypto.Hash import SHA512
logger = logging.getLogger(__name__)
SIG = "{%s#}%s" % (ds.NAMESPACE, "Signature")
RSA_1_5 = "http://www.w3.org/2001/04/xmlenc#rsa-1_5"
TRIPLE_DES_CBC = "http://www.w3.org/2001/04/xmlenc#tripledes-cbc"
XMLTAG = "<?xml version='1.0'?>"
PREFIX1 = "<?xml version='1.0' encoding='UTF-8'?>"
PREFIX2 = '<?xml version="1.0" encoding="UTF-8"?>'
class SigverError(SAMLError):
pass
class CertificateTooOld(SigverError):
pass
class XmlsecError(SigverError):
pass
class MissingKey(SigverError):
pass
class DecryptError(XmlsecError):
pass
class EncryptError(XmlsecError):
pass
class SignatureError(XmlsecError):
pass
class BadSignature(SigverError):
"""The signature is invalid."""
pass
class CertificateError(SigverError):
pass
def read_file(*args, **kwargs):
with open(*args, **kwargs) as handler:
return handler.read()
def rm_xmltag(statement):
try:
_t = statement.startswith(XMLTAG)
except TypeError:
statement = statement.decode("utf8")
_t = statement.startswith(XMLTAG)
if _t:
statement = statement[len(XMLTAG):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX1):
statement = statement[len(PREFIX1):]
if statement[0] == '\n':
statement = statement[1:]
elif statement.startswith(PREFIX2):
statement = statement[len(PREFIX2):]
if statement[0] == '\n':
statement = statement[1:]
return statement
def signed(item):
if SIG in item.c_children.keys() and item.signature:
return True
else:
for prop in item.c_child_order:
child = getattr(item, prop, None)
if isinstance(child, list):
for chi in child:
if signed(chi):
return True
elif child and signed(child):
return True
return False
def get_xmlsec_binary(paths=None):
"""
Tries to find the xmlsec1 binary.
:param paths: Non-system path paths which should be searched when
looking for xmlsec1
:return: full name of the xmlsec1 binary found. If no binaries are
found then an exception is raised.
"""
if os.name == "posix":
bin_name = ["xmlsec1"]
elif os.name == "nt":
bin_name = ["xmlsec.exe", "xmlsec1.exe"]
else: # Default !?
bin_name = ["xmlsec1"]
if paths:
for bname in bin_name:
for path in paths:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
for path in os.environ["PATH"].split(os.pathsep):
for bname in bin_name:
fil = os.path.join(path, bname)
try:
if os.lstat(fil):
return fil
except OSError:
pass
raise SigverError("Can't find %s" % bin_name)
def _get_xmlsec_cryptobackend(path=None, search_paths=None, debug=False):
"""
Initialize a CryptoBackendXmlSec1 crypto backend.
This function is now internal to this module.
"""
if path is None:
path = get_xmlsec_binary(paths=search_paths)
return CryptoBackendXmlSec1(path, debug=debug)
ID_ATTR = "ID"
NODE_NAME = "urn:oasis:names:tc:SAML:2.0:assertion:Assertion"
ENC_NODE_NAME = "urn:oasis:names:tc:SAML:2.0:assertion:EncryptedAssertion"
ENC_KEY_CLASS = "EncryptedKey"
_TEST_ = True
# --------------------------------------------------------------------------
def _make_vals(val, klass, seccont, klass_inst=None, prop=None, part=False,
base64encode=False, elements_to_sign=None):
"""
Creates a class instance with a specified value, the specified
class instance may be a value on a property in a defined class instance.
:param val: The value
:param klass: The value class
:param klass_inst: The class instance which has a property on which
what this function returns is a value.
:param prop: The property which the value should be assigned to.
:param part: If the value is one of a possible list of values it should be
handled slightly different compared to if it isn't.
:return: Value class instance
"""
cinst = None
#print "make_vals(%s, %s)" % (val, klass)
if isinstance(val, dict):
cinst = _instance(klass, val, seccont, base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
try:
cinst = klass().set_text(val)
except ValueError:
if not part:
cis = [_make_vals(sval, klass, seccont, klass_inst, prop,
True, base64encode, elements_to_sign) for sval
in val]
setattr(klass_inst, prop, cis)
else:
raise
if part:
return cinst
else:
if cinst:
cis = [cinst]
setattr(klass_inst, prop, cis)
def _instance(klass, ava, seccont, base64encode=False, elements_to_sign=None):
instance = klass()
for prop in instance.c_attributes.values():
#print "# %s" % (prop)
if prop in ava:
if isinstance(ava[prop], bool):
setattr(instance, prop, "%s" % ava[prop])
elif isinstance(ava[prop], int):
setattr(instance, prop, "%d" % ava[prop])
else:
setattr(instance, prop, ava[prop])
if "text" in ava:
instance.set_text(ava["text"], base64encode)
for prop, klassdef in instance.c_children.values():
#print "## %s, %s" % (prop, klassdef)
if prop in ava:
#print "### %s" % ava[prop]
if isinstance(klassdef, list):
# means there can be a list of values
_make_vals(ava[prop], klassdef[0], seccont, instance, prop,
base64encode=base64encode,
elements_to_sign=elements_to_sign)
else:
cis = _make_vals(ava[prop], klassdef, seccont, instance, prop,
True, base64encode, elements_to_sign)
setattr(instance, prop, cis)
if "extension_elements" in ava:
for item in ava["extension_elements"]:
instance.extension_elements.append(
ExtensionElement(item["tag"]).loadd(item))
if "extension_attributes" in ava:
for key, val in ava["extension_attributes"].items():
instance.extension_attributes[key] = val
if "signature" in ava:
elements_to_sign.append((class_name(instance), instance.id))
return instance
def signed_instance_factory(instance, seccont, elements_to_sign=None):
"""
:param instance: The instance to be signed or not
:param seccont: The security context
:param elements_to_sign: Which parts if any that should be signed
:return: A class instance if not signed otherwise a string
"""
if elements_to_sign:
signed_xml = "%s" % instance
for (node_name, nodeid) in elements_to_sign:
signed_xml = seccont.sign_statement(
signed_xml, node_name=node_name, node_id=nodeid)
#print "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
#print "%s" % signed_xml
#print "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
return signed_xml
else:
return instance
# --------------------------------------------------------------------------
# def create_id():
# """ Create a string of 40 random characters from the set [a-p],
# can be used as a unique identifier of objects.
#
# :return: The string of random characters
# """
# return rndstr(40, "abcdefghijklmonp")
def make_temp(string, suffix="", decode=True, delete=True):
""" xmlsec needs files in some cases where only strings exist, hence the
need for this function. It creates a temporary file with the
string as only content.
:param string: The information to be placed in the file
:param suffix: The temporary file might have to have a specific
suffix in certain circumstances.
:param decode: The input string might be base64 coded. If so it
must, in some cases, be decoded before being placed in the file.
:return: 2-tuple with file pointer ( so the calling function can
close the file) and filename (which is for instance needed by the
xmlsec function).
"""
ntf = NamedTemporaryFile(suffix=suffix, delete=delete)
if decode:
ntf.write(base64.b64decode(string))
else:
ntf.write(string)
ntf.seek(0)
return ntf, ntf.name
def split_len(seq, length):
return [seq[i:i + length] for i in range(0, len(seq), length)]
# --------------------------------------------------------------------------
M2_TIME_FORMAT = "%b %d %H:%M:%S %Y"
def to_time(_time):
assert _time.endswith(" GMT")
_time = _time[:-4]
return mktime(str_to_time(_time, M2_TIME_FORMAT))
def active_cert(key):
"""
Verifies that a key is active that is present time is after not_before
and before not_after.
:param key: The Key
:return: True if the key is active else False
"""
cert_str = pem_format(key)
certificate = importKey(cert_str)
try:
not_before = to_time(str(certificate.get_not_before()))
not_after = to_time(str(certificate.get_not_after()))
assert not_before < utc_now()
assert not_after > utc_now()
return True
except AssertionError:
return False
except AttributeError:
return False
def cert_from_key_info(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo instance. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo instance
:return: A possibly empty list of certs
"""
res = []
for x509_data in key_info.x509_data:
#print "X509Data",x509_data
x509_certificate = x509_data.x509_certificate
cert = x509_certificate.text.strip()
cert = "\n".join(split_len("".join([s.strip() for s in
cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info("Inactive cert")
return res
def cert_from_key_info_dict(key_info, ignore_age=False):
""" Get all X509 certs from a KeyInfo dictionary. Care is taken to make sure
that the certs are continues sequences of bytes.
All certificates appearing in an X509Data element MUST relate to the
validation key by either containing it or being part of a certification
chain that terminates in a certificate containing the validation key.
:param key_info: The KeyInfo dictionary
:return: A possibly empty list of certs in their text representation
"""
res = []
if not "x509_data" in key_info:
return res
for x509_data in key_info["x509_data"]:
x509_certificate = x509_data["x509_certificate"]
cert = x509_certificate["text"].strip()
cert = "\n".join(split_len("".join([s.strip() for s in
cert.split()]), 64))
if ignore_age or active_cert(cert):
res.append(cert)
else:
logger.info("Inactive cert")
return res
def cert_from_instance(instance):
""" Find certificates that are part of an instance
:param instance: An instance
:return: possible empty list of certificates
"""
if instance.signature:
if instance.signature.key_info:
return cert_from_key_info(instance.signature.key_info,
ignore_age=True)
return []
# =============================================================================
def intarr2long(arr):
return long(''.join(["%02x" % byte for byte in arr]), 16)
def dehexlify(bi):
s = hexlify(bi)
return [int(s[i] + s[i + 1], 16) for i in range(0, len(s), 2)]
def base64_to_long(data):
_d = base64.urlsafe_b64decode(data + '==')
return intarr2long(dehexlify(_d))
def key_from_key_value(key_info):
res = []
for value in key_info.key_value:
if value.rsa_key_value:
e = base64_to_long(value.rsa_key_value.exponent)
m = base64_to_long(value.rsa_key_value.modulus)
key = RSA.construct((m, e))
res.append(key)
return res
def key_from_key_value_dict(key_info):
res = []
if not "key_value" in key_info:
return res
for value in key_info["key_value"]:
if "rsa_key_value" in value:
e = base64_to_long(value["rsa_key_value"]["exponent"])
m = base64_to_long(value["rsa_key_value"]["modulus"])
key = RSA.construct((m, e))
res.append(key)
return res
# =============================================================================
#def rsa_load(filename):
# """Read a PEM-encoded RSA key pair from a file."""
# return M2Crypto.RSA.load_key(filename, M2Crypto.util
# .no_passphrase_callback)
#
#
#def rsa_loads(key):
# """Read a PEM-encoded RSA key pair from a string."""
# return M2Crypto.RSA.load_key_string(key,
# M2Crypto.util.no_passphrase_callback)
def rsa_eq(key1, key2):
# Check if two RSA keys are in fact the same
if key1.n == key2.n and key1.e == key2.e:
return True
else:
return False
def extract_rsa_key_from_x509_cert(pem):
# Convert from PEM to DER
der = ssl.PEM_cert_to_DER_cert(pem)
# Extract subjectPublicKeyInfo field from X.509 certificate (see RFC3280)
cert = DerSequence()
cert.decode(der)
tbsCertificate = DerSequence()
tbsCertificate.decode(cert[0])
subjectPublicKeyInfo = tbsCertificate[6]
# Initialize RSA key
rsa_key = RSA.importKey(subjectPublicKeyInfo)
return rsa_key
def pem_format(key):
return "\n".join(["-----BEGIN CERTIFICATE-----",
key, "-----END CERTIFICATE-----"])
def import_rsa_key_from_file(filename):
return RSA.importKey(read_file(filename, 'r'))
def parse_xmlsec_output(output):
""" Parse the output from xmlsec to try to find out if the
command was successfull or not.
:param output: The output from Popen
:return: A boolean; True if the command was a success otherwise False
"""
for line in output.split("\n"):
if line == "OK":
return True
elif line == "FAIL":
raise XmlsecError(output)
raise XmlsecError(output)
def sha1_digest(msg):
return hashlib.sha1(msg).digest()
# --------------------------------------------------------------------------
class NamedPipe(object):
def __init__(self):
self._tempdir = mkdtemp()
self.name = os.path.join(self._tempdir, 'fifo')
try:
os.mkfifo(self.name)
except:
os.rmdir(self._tempdir)
def close(self):
os.remove(self.name)
os.rmdir(self._tempdir)
# --------------------------------------------------------------------------
class Signer(object):
"""Abstract base class for signing algorithms."""
def sign(self, msg, key):
"""Sign ``msg`` with ``key`` and return the signature."""
raise NotImplementedError
def verify(self, msg, sig, key):
"""Return True if ``sig`` is a valid signature for ``msg``."""
raise NotImplementedError
class RSASigner(Signer):
def __init__(self, digest):
self.digest = digest
def sign(self, msg, key):
h = self.digest.new(msg)
signer = PKCS1_v1_5.new(key)
return signer.sign(h)
def verify(self, msg, sig, key):
h = self.digest.new(msg)
verifier = PKCS1_v1_5.new(key)
return verifier.verify(h, sig)
SIGNER_ALGS = {
SIG_RSA_SHA1: RSASigner(SHA),
SIG_RSA_SHA224: RSASigner(SHA224),
SIG_RSA_SHA256: RSASigner(SHA256),
SIG_RSA_SHA384: RSASigner(SHA384),
SIG_RSA_SHA512: RSASigner(SHA512),
}
REQ_ORDER = ["SAMLRequest", "RelayState", "SigAlg"]
RESP_ORDER = ["SAMLResponse", "RelayState", "SigAlg"]
def verify_redirect_signature(saml_msg, cert=None, sigkey=None):
"""
:param saml_msg: A dictionary with strings as values, *NOT* lists as
produced by parse_qs.
:param cert: A certificate to use when verifying the signature
:return: True, if signature verified
"""
try:
signer = SIGNER_ALGS[saml_msg["SigAlg"]]
except KeyError:
raise Unsupported("Signature algorithm: %s" % saml_msg["SigAlg"])
else:
if saml_msg["SigAlg"] in SIGNER_ALGS:
if "SAMLRequest" in saml_msg:
_order = REQ_ORDER
elif "SAMLResponse" in saml_msg:
_order = RESP_ORDER
else:
raise Unsupported(
"Verifying signature on something that should not be "
"signed")
_args = saml_msg.copy()
del _args["Signature"] # everything but the signature
string = "&".join(
[urllib.urlencode({k: _args[k]}) for k in _order if k in _args])
if cert:
_key = extract_rsa_key_from_x509_cert(pem_format(cert))
else:
_key = sigkey
_sign = base64.b64decode(saml_msg["Signature"])
return bool(signer.verify(string, _sign, _key))
LOG_LINE = 60 * "=" + "\n%s\n" + 60 * "-" + "\n%s" + 60 * "="
LOG_LINE_2 = 60 * "=" + "\n%s\n%s\n" + 60 * "-" + "\n%s" + 60 * "="
# ---------------------------------------------------------------------------
def read_cert_from_file(cert_file, cert_type):
""" Reads a certificate from a file. The assumption is that there is
only one certificate in the file
:param cert_file: The name of the file
:param cert_type: The certificate type
:return: A base64 encoded certificate as a string or the empty string
"""
if not cert_file:
return ""
if cert_type == "pem":
line = open(cert_file).read().replace("\r\n", "\n").split("\n")
if line[0] == "-----BEGIN CERTIFICATE-----":
line = line[1:]
elif line[0] == "-----BEGIN PUBLIC KEY-----":
line = line[1:]
else:
raise CertificateError("Strange beginning of PEM file")
while line[-1] == "":
line = line[:-1]
if line[-1] == "-----END CERTIFICATE-----":
line = line[:-1]
elif line[-1] == "-----END PUBLIC KEY-----":
line = line[:-1]
else:
raise CertificateError("Strange end of PEM file")
return "".join(line)
if cert_type in ["der", "cer", "crt"]:
data = read_file(cert_file)
return base64.b64encode(str(data))
class CryptoBackend():
def __init__(self, debug=False):
self.debug = debug
def version(self):
raise NotImplementedError()
def encrypt(self, text, recv_key, template, key_type):
raise NotImplementedError()
def encrypt_assertion(self, statement, enc_key, template, key_type,
node_xpath):
raise NotImplementedError()
def decrypt(self, enctext, key_file, passphrase=None):
raise NotImplementedError()
def sign_statement(self, statement, node_name, key_file, node_id,
id_attr):
raise NotImplementedError()
def validate_signature(self, enctext, cert_file, cert_type, node_name,
node_id, id_attr):
raise NotImplementedError()
ASSERT_XPATH = ''.join(["/*[local-name()=\"%s\"]" % v for v in [
"Response", "EncryptedAssertion", "Assertion"]])
class CryptoBackendXmlSec1(CryptoBackend):
"""
CryptoBackend implementation using external binary 1 to sign
and verify XML documents.
"""
__DEBUG = 0
def __init__(self, xmlsec_binary, **kwargs):
CryptoBackend.__init__(self, **kwargs)
assert (isinstance(xmlsec_binary, basestring))
self.xmlsec = xmlsec_binary
if os.environ.get('PYSAML2_KEEP_XMLSEC_TMP', None):
self._xmlsec_delete_tmpfiles = False
else:
self._xmlsec_delete_tmpfiles = True
def version(self):
com_list = [self.xmlsec, "--version"]
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
try:
return pof.stdout.read().split(" ")[1]
except IndexError:
return ""
def encrypt(self, text, recv_key, template, session_key_type, xpath=""):
"""
:param text: The text to be compiled
:param recv_key: Filename of a file where the key resides
:param template: Filename of a file with the pre-encryption part
:param session_key_type: Type and size of a new session key
"des-192" generates a new 192 bits DES key for DES3 encryption
:param xpath: What should be encrypted
:return:
"""
logger.debug("Encryption input len: %d" % len(text))
_, fil = make_temp("%s" % text, decode=False)
com_list = [self.xmlsec, "--encrypt", "--pubkey-cert-pem", recv_key,
"--session-key", session_key_type, "--xml-data", fil]
if xpath:
com_list.extend(['--node-xpath', xpath])
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [template],
exception=DecryptError,
validate_output=False)
return output
def encrypt_assertion(self, statement, enc_key, template,
key_type="des-192", node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
if isinstance(statement, SamlBase):
statement = pre_encrypt_assertion(statement)
_, fil = make_temp("%s" % statement, decode=False, delete=False)
_, tmpl = make_temp("%s" % template, decode=False)
if not node_xpath:
node_xpath = ASSERT_XPATH
com_list = [self.xmlsec, "encrypt", "--pubkey-cert-pem", enc_key,
"--session-key", key_type, "--xml-data", fil,
"--node-xpath", node_xpath]
(_stdout, _stderr, output) = self._run_xmlsec(
com_list, [tmpl], exception=EncryptError, validate_output=False)
os.unlink(fil)
if not output:
raise EncryptError(_stderr)
return output
def decrypt(self, enctext, key_file, passphrase=None):
"""
:param enctext: XML document containing an encrypted part
:param key_file: The key to use for the decryption
:return: The decrypted document
"""
logger.debug("Decrypt input len: %d" % len(enctext))
_, fil = make_temp("%s" % enctext, decode=False)
com_list = [self.xmlsec, "--decrypt", "--id-attr:%s" % ID_ATTR,
ENC_KEY_CLASS]
(_stdout, _stderr, output) = self._run_xmlsec(com_list, [fil],
exception=DecryptError,
validate_output=False,
key_file=key_file,
passphrase=passphrase)
return output
def sign_statement(self, statement, node_name, key_file, node_id,
id_attr, passphrase=None):
"""
Sign an XML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key_file: The file where the key can be found
:param node_id:
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The signed statement
"""
_, fil = make_temp("%s" % statement, suffix=".xml", decode=False,
delete=self._xmlsec_delete_tmpfiles)
com_list = [self.xmlsec, "--sign",
"--id-attr:%s" % id_attr, node_name]
if node_id:
com_list.extend(["--node-id", node_id])
try:
(stdout, stderr, signed_statement) = self._run_xmlsec(
com_list, [fil], validate_output=False, key_file=key_file,
passphrase=passphrase)
# this doesn't work if --store-signatures are used
if stdout == "":
if signed_statement:
return signed_statement
logger.error(
"Signing operation failed :\nstdout : %s\nstderr : %s" % (
stdout, stderr))
raise SigverError(stderr)
except DecryptError:
raise SigverError("Signing failed")
def validate_signature(self, signedtext, cert_file, cert_type, node_name,
node_id, id_attr):
"""
Validate signature on XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:param id_attr: Should normally be one of "id", "Id" or "ID"
:return: Boolean True if the signature was correct otherwise False.
"""
_, fil = make_temp(signedtext, suffix=".xml",
decode=False, delete=self._xmlsec_delete_tmpfiles)
com_list = [self.xmlsec, "--verify",
"--pubkey-cert-%s" % cert_type, cert_file,
"--id-attr:%s" % id_attr, node_name]
if self.debug:
com_list.append("--store-signatures")
if node_id:
com_list.extend(["--node-id", node_id])
if self.__DEBUG:
try:
print " ".join(com_list)
except TypeError:
print "cert_type", cert_type
print "cert_file", cert_file
print "node_name", node_name
print "fil", fil
raise
print "%s: %s" % (cert_file, os.access(cert_file, os.F_OK))
print "%s: %s" % (fil, os.access(fil, os.F_OK))
(_stdout, stderr, _output) = self._run_xmlsec(com_list, [fil],
exception=SignatureError)
return parse_xmlsec_output(stderr)
def _run_xmlsec(self, com_list, extra_args, validate_output=True,
exception=XmlsecError,
key_file=None,
passphrase=None):
"""
Common code to invoke xmlsec and parse the output.
:param com_list: Key-value parameter list for xmlsec
:param extra_args: Positional parameters to be appended after all
key-value parameters
:param validate_output: Parse and validate the output
:param exception: The exception class to raise on errors
:result: Whatever xmlsec wrote to an --output temporary file
"""
ntf = NamedTemporaryFile(suffix=".xml",
delete=self._xmlsec_delete_tmpfiles)
com_list.extend(["--output", ntf.name])
# Unfortunately there's no safe way to pass a password to xmlsec1.
# Instead, we'll decrypt the certificate and write it into a named pipe,
# which we'll pass to xmlsec1.
named_pipe = None
if key_file is not None:
if passphrase is not None:
named_pipe = NamedPipe()
# Decrypt the certificate, but don't write it into the FIFO
# until after we've started xmlsec1.
with open(key_file) as f:
key = importKey(f.read(), passphrase=passphrase)
key_file = named_pipe.name
com_list.extend(["--privkey-pem", key_file])
com_list += extra_args
logger.debug("xmlsec command: %s" % " ".join(com_list))
pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
if named_pipe is not None:
# Finally, write the key into our named pipe.
try:
with open(named_pipe.name, 'wb') as f:
f.write(key.exportKey())
finally:
named_pipe.close()
p_out = pof.stdout.read()
p_err = pof.stderr.read()
if pof.returncode is not None and pof.returncode < 0:
logger.error(LOG_LINE % (p_out, p_err))
raise XmlsecError("%d:%s" % (pof.returncode, p_err))
try:
if validate_output:
parse_xmlsec_output(p_err)
except XmlsecError, exc:
logger.error(LOG_LINE_2 % (p_out, p_err, exc))
raise
ntf.seek(0)
return p_out, p_err, ntf.read()
class CryptoBackendXMLSecurity(CryptoBackend):
"""
CryptoBackend implementation using pyXMLSecurity to sign and verify
XML documents.
Encrypt and decrypt is currently unsupported by pyXMLSecurity.
pyXMLSecurity uses lxml (libxml2) to parse XML data, but otherwise
try to get by with native Python code. It does native Python RSA
signatures, or alternatively PyKCS11 to offload cryptographic work
to an external PKCS#11 module.
"""
def __init__(self, debug=False):
CryptoBackend.__init__(self)
self.debug = debug
def version(self):
# XXX if XMLSecurity.__init__ included a __version__, that would be
# better than static 0.0 here.
return "XMLSecurity 0.0"
def sign_statement(self, statement, node_name, key_file, node_id,
_id_attr, passphrase=None):
"""
Sign an XML statement.
The parameters actually used in this CryptoBackend
implementation are :
:param statement: XML as string
:param node_name: Name of the node to sign
:param key_file: xmlsec key_spec string(), filename,
"pkcs11://" URI or PEM data
:returns: Signed XML as string
"""
import xmlsec
import lxml.etree
assert passphrase is None, "Encrypted key files is not supported"
xml = xmlsec.parse_xml(statement)
signed = xmlsec.sign(xml, key_file)
return lxml.etree.tostring(signed, xml_declaration=True)
def validate_signature(self, signedtext, cert_file, cert_type, node_name,
node_id, id_attr):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
"pkcs11://" URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != "pem":
raise Unsupported("Only PEM certs supported here")
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False
def security_context(conf, debug=None):
""" Creates a security context based on the configuration
:param conf: The configuration
:return: A SecurityContext instance
"""
if not conf:
return None
if debug is None:
try:
debug = conf.debug
except AttributeError:
pass
try:
metadata = conf.metadata
except AttributeError:
metadata = None
_only_md = conf.only_use_keys_in_metadata
if _only_md is None:
_only_md = False
if conf.crypto_backend == 'xmlsec1':
xmlsec_binary = conf.xmlsec_binary
if not xmlsec_binary:
try:
_path = conf.xmlsec_path
except AttributeError:
_path = []
xmlsec_binary = get_xmlsec_binary(_path)
# verify that xmlsec is where it's supposed to be
if not os.path.exists(xmlsec_binary):
#if not os.access(, os.F_OK):
raise SigverError(
"xmlsec binary not in '%s' !" % xmlsec_binary)
crypto = _get_xmlsec_cryptobackend(xmlsec_binary, debug=debug)
elif conf.crypto_backend == 'XMLSecurity':
# new and somewhat untested pyXMLSecurity crypto backend.
crypto = CryptoBackendXMLSecurity(debug=debug)
else:
raise SigverError('Unknown crypto_backend %s' % (
repr(conf.crypto_backend)))
return SecurityContext(
crypto, conf.key_file, cert_file=conf.cert_file, metadata=metadata,
debug=debug, only_use_keys_in_metadata=_only_md,
cert_handler_extra_class=conf.cert_handler_extra_class,
generate_cert_info=conf.generate_cert_info,
tmp_cert_file=conf.tmp_cert_file,
tmp_key_file=conf.tmp_key_file,
validate_certificate=conf.validate_certificate,
key_file_passphrase=conf.key_file_passphrase)
def encrypt_cert_from_item(item):
_encrypt_cert = None
try:
_elem = extension_elements_to_elements(item.extension_elements[0].children,
[pefim, ds])
if len(_elem) == 1:
_encrypt_cert = _elem[0].x509_data[0].x509_certificate.text
else:
certs = cert_from_instance(item)
if len(certs) > 0:
_encrypt_cert = certs[0]
except Exception:
pass
if _encrypt_cert is None:
certs = cert_from_instance(item)
if len(certs) > 0:
_encrypt_cert = certs[0]
if _encrypt_cert is not None:
if _encrypt_cert.find("-----BEGIN CERTIFICATE-----\n") == -1:
_encrypt_cert = "-----BEGIN CERTIFICATE-----\n" + _encrypt_cert
if _encrypt_cert.find("\n-----END CERTIFICATE-----") == -1:
_encrypt_cert = _encrypt_cert + "\n-----END CERTIFICATE-----"
return _encrypt_cert
class CertHandlerExtra(object):
def __init__(self):
pass
def use_generate_cert_func(self):
raise Exception("use_generate_cert_func function must be implemented")
def generate_cert(self, generate_cert_info, root_cert_string,
root_key_string):
raise Exception("generate_cert function must be implemented")
#Excepts to return (cert_string, key_string)
def use_validate_cert_func(self):
raise Exception("use_validate_cert_func function must be implemented")
def validate_cert(self, cert_str, root_cert_string, root_key_string):
raise Exception("validate_cert function must be implemented")
#Excepts to return True/False
class CertHandler(object):
def __init__(self, security_context, cert_file=None, cert_type="pem",
key_file=None, key_type="pem", generate_cert_info=None,
cert_handler_extra_class=None, tmp_cert_file=None,
tmp_key_file=None, verify_cert=False):
"""
Initiates the class for handling certificates. Enables the certificates
to either be a single certificate as base functionality or makes it
possible to generate a new certificate for each call to the function.
:param security_context:
:param cert_file:
:param cert_type:
:param key_file:
:param key_type:
:param generate_cert_info:
:param cert_handler_extra_class:
:param tmp_cert_file:
:param tmp_key_file:
:param verify_cert:
"""
self._verify_cert = False
self._generate_cert = False
#This cert do not have to be valid, it is just the last cert to be
# validated.
self._last_cert_verified = None
if cert_type == "pem" and key_type == "pem":
self._verify_cert = verify_cert is True
self._security_context = security_context
self._osw = OpenSSLWrapper()
if key_file and os.path.isfile(key_file):
self._key_str = self._osw.read_str_from_file(key_file, key_type)
else:
self._key_str = ""
if cert_file and os.path.isfile(cert_file):
self._cert_str = self._osw.read_str_from_file(cert_file,
cert_type)
else:
self._cert_str = ""
self._tmp_cert_str = self._cert_str
self._tmp_key_str = self._key_str
self._tmp_cert_file = tmp_cert_file
self._tmp_key_file = tmp_key_file
self._cert_info = None
self._generate_cert_func_active = False
if generate_cert_info is not None and len(self._cert_str) > 0 and \
len(self._key_str) > 0 and tmp_key_file is not \
None and tmp_cert_file is not None:
self._generate_cert = True
self._cert_info = generate_cert_info
self._cert_handler_extra_class = cert_handler_extra_class
def verify_cert(self, cert_file):
if self._verify_cert:
cert_str = self._osw.read_str_from_file(cert_file, "pem")
self._last_validated_cert = cert_str
if self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_validate_cert_func():
self._cert_handler_extra_class.validate_cert(
cert_str, self._cert_str, self._key_str)
else:
valid, mess = self._osw.verify(self._cert_str, cert_str)
logger.info("CertHandler.verify_cert: %s" % mess)
return valid
return True
def generate_cert(self):
return self._generate_cert
def update_cert(self, active=False, client_crt=None):
if (self._generate_cert and active) or client_crt is not None:
if client_crt is not None:
self._tmp_cert_str = client_crt
#No private key for signing
self._tmp_key_str = ""
elif self._cert_handler_extra_class is not None and \
self._cert_handler_extra_class.use_generate_cert_func():
(self._tmp_cert_str, self._tmp_key_str) = \
self._cert_handler_extra_class.generate_cert(
self._cert_info, self._cert_str, self._key_str)
else:
self._tmp_cert_str, self._tmp_key_str = self._osw\
.create_certificate(
self._cert_info, request=True)
self._tmp_cert_str = self._osw.create_cert_signed_certificate(
self._cert_str, self._key_str, self._tmp_cert_str)
valid, mess = self._osw.verify(self._cert_str,
self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_cert_file, self._tmp_cert_str)
self._osw.write_str_to_file(self._tmp_key_file, self._tmp_key_str)
self._security_context.key_file = self._tmp_key_file
self._security_context.cert_file = self._tmp_cert_file
self._security_context.key_type = "pem"
self._security_context.cert_type = "pem"
self._security_context.my_cert = read_cert_from_file(
self._security_context.cert_file,
self._security_context.cert_type)
# How to get a rsa pub key fingerprint from a certificate
# openssl x509 -inform pem -noout -in server.crt -pubkey > publickey.pem
# openssl rsa -inform pem -noout -in publickey.pem -pubin -modulus
class SecurityContext(object):
my_cert = None
def __init__(self, crypto, key_file="", key_type="pem",
cert_file="", cert_type="pem", metadata=None,
debug=False, template="", encrypt_key_type="des-192",
only_use_keys_in_metadata=False, cert_handler_extra_class=None,
generate_cert_info=None, tmp_cert_file=None,
tmp_key_file=None, validate_certificate=None,
key_file_passphrase=None):
self.crypto = crypto
assert (isinstance(self.crypto, CryptoBackend))
# Your private key
self.key_file = key_file
self.key_file_passphrase = key_file_passphrase
self.key_type = key_type
# Your public key
self.cert_file = cert_file
self.cert_type = cert_type
self.my_cert = read_cert_from_file(cert_file, cert_type)
self.cert_handler = CertHandler(self, cert_file, cert_type, key_file,
key_type, generate_cert_info,
cert_handler_extra_class, tmp_cert_file,
tmp_key_file, validate_certificate)
self.cert_handler.update_cert(True)
self.metadata = metadata
self.only_use_keys_in_metadata = only_use_keys_in_metadata
self.debug = debug
if not template:
this_dir, this_filename = os.path.split(__file__)
self.template = os.path.join(this_dir, "xml", "template.xml")
else:
self.template = template
self.encrypt_key_type = encrypt_key_type
# keep certificate files to debug xmlsec invocations
if os.environ.get('PYSAML2_KEEP_XMLSEC_TMP', None):
self._xmlsec_delete_tmpfiles = False
else:
self._xmlsec_delete_tmpfiles = True
def correctly_signed(self, xml, must=False):
logger.debug("verify correct signature")
return self.correctly_signed_response(xml, must)
def encrypt(self, text, recv_key="", template="", key_type=""):
"""
xmlsec encrypt --pubkey-pem pub-userkey.pem
--session-key aes128-cbc --xml-data doc-plain.xml
--output doc-encrypted.xml session-key-template.xml
:param text: Text to encrypt
:param recv_key: A file containing the receivers public key
:param template: A file containing the XMLSEC template
:param key_type: The type of session key to use
:result: An encrypted XML text
"""
if not key_type:
key_type = self.encrypt_key_type
if not template:
template = self.template
return self.crypto.encrypt(text, recv_key, template, key_type)
def encrypt_assertion(self, statement, enc_key, template,
key_type="des-192", node_xpath=None):
"""
Will encrypt an assertion
:param statement: A XML document that contains the assertion to encrypt
:param enc_key: File name of a file containing the encryption key
:param template: A template for the encryption part to be added.
:param key_type: The type of session key to use.
:return: The encrypted text
"""
raise NotImplemented()
def decrypt(self, enctext, key_file=None, passphrase=None):
""" Decrypting an encrypted text by the use of a private key.
:param enctext: The encrypted text as a string
:return: The decrypted text
"""
if key_file is None or len(key_file.strip()) == 0:
key_file = self.key_file
if passphrase is None:
passphrase = self.key_file_passphrase
return self.crypto.decrypt(enctext, key_file, passphrase)
def verify_signature(self, signedtext, cert_file=None, cert_type="pem",
node_name=NODE_NAME, node_id=None, id_attr=""):
""" Verifies the signature of a XML document.
:param signedtext: The XML document as a string
:param cert_file: The public key that was used to sign the document
:param cert_type: The file type of the certificate
:param node_name: The name of the class that is signed
:param node_id: The identifier of the node
:param id_attr: Should normally be one of "id", "Id" or "ID"
:return: Boolean True if the signature was correct otherwise False.
"""
# This is only for testing purposes, otherwise when would you receive
# stuff that is signed with your key !?
if not cert_file:
cert_file = self.cert_file
cert_type = self.cert_type
if not id_attr:
id_attr = ID_ATTR
return self.crypto.validate_signature(signedtext, cert_file=cert_file,
cert_type=cert_type,
node_name=node_name,
node_id=node_id, id_attr=id_attr)
def _check_signature(self, decoded_xml, item, node_name=NODE_NAME,
origdoc=None, id_attr="", must=False,
only_valid_cert=False):
#print item
try:
issuer = item.issuer.text.strip()
except AttributeError:
issuer = None
# More trust in certs from metadata then certs in the XML document
if self.metadata:
try:
_certs = self.metadata.certs(issuer, "any", "signing")
except KeyError:
_certs = []
certs = []
for cert in _certs:
if isinstance(cert, basestring):
certs.append(make_temp(pem_format(cert), suffix=".pem",
decode=False,
delete=self._xmlsec_delete_tmpfiles))
else:
certs.append(cert)
else:
certs = []
if not certs and not self.only_use_keys_in_metadata:
logger.debug("==== Certs from instance ====")
certs = [make_temp(pem_format(cert), suffix=".pem",
decode=False, delete=self._xmlsec_delete_tmpfiles)
for cert in cert_from_instance(item)]
else:
logger.debug("==== Certs from metadata ==== %s: %s ====" % (issuer,
certs))
if not certs:
raise MissingKey("%s" % issuer)
#print certs
verified = False
last_pem_file = None
for _, pem_file in certs:
try:
last_pem_file = pem_file
if origdoc is not None:
try:
if self.verify_signature(origdoc, pem_file,
node_name=node_name,
node_id=item.id,
id_attr=id_attr):
verified = True
break
except Exception:
if self.verify_signature(decoded_xml, pem_file,
node_name=node_name,
node_id=item.id,
id_attr=id_attr):
verified = True
break
else:
if self.verify_signature(decoded_xml, pem_file,
node_name=node_name,
node_id=item.id, id_attr=id_attr):
verified = True
break
except XmlsecError, exc:
logger.error("check_sig: %s" % exc)
pass
except SignatureError, exc:
logger.error("check_sig: %s" % exc)
pass
except Exception, exc:
logger.error("check_sig: %s" % exc)
raise
if (not verified) and (not only_valid_cert):
raise SignatureError("Failed to verify signature")
else:
if not self.cert_handler.verify_cert(last_pem_file):
raise CertificateError("Invalid certificate!")
return item
def check_signature(self, item, node_name=NODE_NAME, origdoc=None,
id_attr="", must=False):
"""
:param item: Parsed entity
:param node_name: The name of the node/class/element that is signed
:param origdoc: The original XML string
:param id_attr:
:param must:
:return:
"""
return self._check_signature(origdoc, item, node_name, origdoc,
id_attr=id_attr, must=must)
def correctly_signed_message(self, decoded_xml, msgtype, must=False,
origdoc=None, only_valid_cert=False):
"""Check if a request is correctly signed, if we have metadata for
the entity that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as an XML infoset (a string)
:param msgtype: SAML protocol message type
:param must: Whether there must be a signature
:param origdoc:
:return:
"""
try:
_func = getattr(samlp, "%s_from_string" % msgtype)
except AttributeError:
_func = getattr(saml, "%s_from_string" % msgtype)
msg = _func(decoded_xml)
if not msg:
raise TypeError("Not a %s" % msgtype)
if not msg.signature:
if must:
raise SignatureError("Required signature missing on %s" % msgtype)
else:
return msg
return self._check_signature(decoded_xml, msg, class_name(msg),
origdoc, must=must,
only_valid_cert=only_valid_cert)
def correctly_signed_authn_request(self, decoded_xml, must=False,
origdoc=None, only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml, "authn_request",
must, origdoc,
only_valid_cert=only_valid_cert)
def correctly_signed_authn_query(self, decoded_xml, must=False,
origdoc=None, only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml, "authn_query",
must, origdoc, only_valid_cert)
def correctly_signed_logout_request(self, decoded_xml, must=False,
origdoc=None, only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml, "logout_request",
must, origdoc, only_valid_cert)
def correctly_signed_logout_response(self, decoded_xml, must=False,
origdoc=None, only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml, "logout_response",
must, origdoc, only_valid_cert)
def correctly_signed_attribute_query(self, decoded_xml, must=False,
origdoc=None, only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml, "attribute_query",
must, origdoc, only_valid_cert)
def correctly_signed_authz_decision_query(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"authz_decision_query", must,
origdoc, only_valid_cert)
def correctly_signed_authz_decision_response(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"authz_decision_response", must,
origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_request(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"name_id_mapping_request",
must, origdoc, only_valid_cert)
def correctly_signed_name_id_mapping_response(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"name_id_mapping_response",
must, origdoc, only_valid_cert)
def correctly_signed_artifact_request(self, decoded_xml, must=False,
origdoc=None, only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"artifact_request",
must, origdoc, only_valid_cert)
def correctly_signed_artifact_response(self, decoded_xml, must=False,
origdoc=None, only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"artifact_response",
must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_request(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"manage_name_id_request",
must, origdoc, only_valid_cert)
def correctly_signed_manage_name_id_response(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"manage_name_id_response", must,
origdoc, only_valid_cert)
def correctly_signed_assertion_id_request(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False,
**kwargs):
return self.correctly_signed_message(decoded_xml,
"assertion_id_request", must,
origdoc, only_valid_cert)
def correctly_signed_assertion_id_response(self, decoded_xml, must=False,
origdoc=None,
only_valid_cert=False, **kwargs):
return self.correctly_signed_message(decoded_xml, "assertion", must,
origdoc, only_valid_cert)
def correctly_signed_response(self, decoded_xml, must=False, origdoc=None,
only_valid_cert=False,
require_response_signature=False, **kwargs):
""" Check if a instance is correctly signed, if we have metadata for
the IdP that sent the info use that, if not use the key that are in
the message if any.
:param decoded_xml: The SAML message as a XML string
:param must: Whether there must be a signature
:param origdoc:
:param only_valid_cert:
:param require_response_signature:
:return: None if the signature can not be verified otherwise an instance
"""
response = samlp.any_response_from_string(decoded_xml)
if not response:
raise TypeError("Not a Response")
if response.signature:
self._check_signature(decoded_xml, response, class_name(response),
origdoc)
elif require_response_signature:
raise SignatureError("Signature missing for response")
# if isinstance(response, Response) and response.assertion:
# # Try to find the signing cert in the assertion
# for assertion in response.assertion:
# if not hasattr(assertion, 'signature') or not assertion.signature:
# logger.debug("unsigned")
# if must:
# raise SignatureError("Signature missing for assertion")
# continue
# else:
# logger.debug("signed")
#
# try:
# self._check_signature(decoded_xml, assertion,
# class_name(assertion), origdoc)
# except Exception, exc:
# logger.error("correctly_signed_response: %s" % exc)
# raise
return response
#--------------------------------------------------------------------------
# SIGNATURE PART
#--------------------------------------------------------------------------
def sign_statement_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_statement(). """
return self.sign_statement(statement, **kwargs)
def sign_statement(self, statement, node_name, key=None,
key_file=None, node_id=None, id_attr="",
passphrase=None):
"""Sign a SAML statement.
:param statement: The statement to be signed
:param node_name: string like 'urn:oasis:names:...:Assertion'
:param key: The key to be used for the signing, either this or
:param key_file: The file where the key can be found
:param node_id:
:param id_attr: The attribute name for the identifier, normally one of
'id','Id' or 'ID'
:return: The signed statement
"""
if not id_attr:
id_attr = ID_ATTR
if not key_file and key:
_, key_file = make_temp("%s" % key, ".pem")
if not key and not key_file:
key_file = self.key_file
if not passphrase:
passphrase = self.key_file_passphrase
return self.crypto.sign_statement(statement, node_name, key_file,
node_id, id_attr,
passphrase=passphrase)
def sign_assertion_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_assertion(). """
return self.sign_statement(statement, class_name(saml.Assertion()),
**kwargs)
def sign_assertion(self, statement, **kwargs):
"""Sign a SAML assertion.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(statement, class_name(saml.Assertion()),
**kwargs)
def sign_attribute_query_using_xmlsec(self, statement, **kwargs):
""" Deprecated function. See sign_attribute_query(). """
return self.sign_attribute_query(statement, **kwargs)
def sign_attribute_query(self, statement, **kwargs):
"""Sign a SAML attribute query.
See sign_statement() for the kwargs.
:param statement: The statement to be signed
:return: The signed statement
"""
return self.sign_statement(statement, class_name(
samlp.AttributeQuery()), **kwargs)
def multiple_signatures(self, statement, to_sign, key=None, key_file=None,
passphrase=None):
"""
Sign multiple parts of a statement
:param statement: The statement that should be sign, this is XML text
:param to_sign: A list of (items, id, id attribute name) tuples that
specifies what to sign
:param key: A key that should be used for doing the signing
:param key_file: A file that contains the key to be used
:return: A possibly multiple signed statement
"""
for (item, sid, id_attr) in to_sign:
if not sid:
if not item.id:
sid = item.id = sid()
else:
sid = item.id
if not item.signature:
item.signature = pre_signature_part(sid, self.cert_file)
statement = self.sign_statement(statement, class_name(item),
key=key, key_file=key_file,
node_id=sid, id_attr=id_attr,
passphrase=passphrase)
return statement
# ===========================================================================
def pre_signature_part(ident, public_key=None, identifier=None,
digest_alg=None, sign_alg=None):
"""
If an assertion is to be signed the signature part has to be preset
with which algorithms to be used, this function returns such a
preset part.
:param ident: The identifier of the assertion, so you know which assertion
was signed
:param public_key: The base64 part of a PEM file
:param identifier:
:return: A preset signature part
"""
if not digest_alg:
digest_alg=ds.digest_default
if not sign_alg:
sign_alg=ds.sig_default
signature_method = ds.SignatureMethod(algorithm=sign_alg)
canonicalization_method = ds.CanonicalizationMethod(
algorithm=ds.ALG_EXC_C14N)
trans0 = ds.Transform(algorithm=ds.TRANSFORM_ENVELOPED)
trans1 = ds.Transform(algorithm=ds.ALG_EXC_C14N)
transforms = ds.Transforms(transform=[trans0, trans1])
digest_method = ds.DigestMethod(algorithm=digest_alg)
reference = ds.Reference(uri="#%s" % ident, digest_value=ds.DigestValue(),
transforms=transforms, digest_method=digest_method)
signed_info = ds.SignedInfo(signature_method=signature_method,
canonicalization_method=canonicalization_method,
reference=reference)
signature = ds.Signature(signed_info=signed_info,
signature_value=ds.SignatureValue())
if identifier:
signature.id = "Signature%d" % identifier
if public_key:
x509_data = ds.X509Data(
x509_certificate=[ds.X509Certificate(text=public_key)])
key_info = ds.KeyInfo(x509_data=x509_data)
signature.key_info = key_info
return signature
# <?xml version="1.0" encoding="UTF-8"?>
# <EncryptedData Id="ED" Type="http://www.w3.org/2001/04/xmlenc#Element"
# xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#tripledes-cbc"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <EncryptedKey Id="EK" xmlns="http://www.w3.org/2001/04/xmlenc#">
# <EncryptionMethod Algorithm="http://www.w3
# .org/2001/04/xmlenc#rsa-1_5"/>
# <ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
# <ds:KeyName>my-rsa-key</ds:KeyName>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# <ReferenceList>
# <DataReference URI="#ED"/>
# </ReferenceList>
# </EncryptedKey>
# </ds:KeyInfo>
# <CipherData>
# <CipherValue>
# </CipherValue>
# </CipherData>
# </EncryptedData>
def pre_encryption_part(msg_enc=TRIPLE_DES_CBC, key_enc=RSA_1_5,
key_name="my-rsa-key"):
"""
:param msg_enc:
:param key_enc:
:param key_name:
:return:
"""
msg_encryption_method = EncryptionMethod(algorithm=msg_enc)
key_encryption_method = EncryptionMethod(algorithm=key_enc)
encrypted_key = EncryptedKey(id="EK",
encryption_method=key_encryption_method,
key_info=ds.KeyInfo(
key_name=ds.KeyName(text=key_name)),
cipher_data=CipherData(
cipher_value=CipherValue(text="")))
key_info = ds.KeyInfo(encrypted_key=encrypted_key)
encrypted_data = EncryptedData(
id="ED",
type="http://www.w3.org/2001/04/xmlenc#Element",
encryption_method=msg_encryption_method,
key_info=key_info,
cipher_data=CipherData(cipher_value=CipherValue(text="")))
return encrypted_data
def pre_encrypt_assertion(response):
"""
Move the assertion to within a encrypted_assertion
:param response: The response with one assertion
:return: The response but now with the assertion within an
encrypted_assertion.
"""
assertion = response.assertion
response.assertion = None
response.encrypted_assertion = EncryptedAssertion()
if isinstance(assertion, list):
response.encrypted_assertion.add_extension_elements(assertion)
else:
response.encrypted_assertion.add_extension_element(assertion)
# txt = "%s" % response
# _ass = "%s" % assertion
# _ass = rm_xmltag(_ass)
# txt.replace(
# "<ns1:EncryptedAssertion/>",
# "<ns1:EncryptedAssertion>%s</ns1:EncryptedAssertion>" % _ass)
return response
def response_factory(sign=False, encrypt=False, **kwargs):
response = samlp.Response(id=sid(), version=VERSION,
issue_instant=instant())
if sign:
response.signature = pre_signature_part(kwargs["id"])
if encrypt:
pass
for key, val in kwargs.items():
setattr(response, key, val)
return response
# ----------------------------------------------------------------------------
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--list-sigalgs', dest='listsigalgs',
action='store_true',
help='List implemented signature algorithms')
args = parser.parse_args()
if args.listsigalgs:
print '\n'.join([key for key, value in SIGNER_ALGS.items()])
|
kidmaple/CoolWall
|
refs/heads/nios2
|
user/python/Lib/dos-8x3/test_ope.py
|
10
|
# Test to see if openpty works. (But don't worry if it isn't available.)
import os
from test_support import verbose, TestFailed, TestSkipped
try:
if verbose:
print "Calling os.openpty()"
master, slave = os.openpty()
if verbose:
print "(master, slave) = (%d, %d)"%(master, slave)
except AttributeError:
raise TestSkipped, "No openpty() available."
if not os.isatty(master):
raise TestFailed, "Master-end of pty is not a terminal."
if not os.isatty(slave):
raise TestFailed, "Slave-end of pty is not a terminal."
os.write(slave, 'Ping!')
print os.read(master, 1024)
|
suutari-ai/shoop
|
refs/heads/master
|
shuup_tests/utils/test_analog.py
|
3
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django import VERSION
from shuup.testing.models import PseudoPaymentProcessor
from shuup.utils.analog import BaseLogEntry, define_log_model
def test_analog():
PseudoPaymentProcessorLogEntry = define_log_model(PseudoPaymentProcessor)
assert PseudoPaymentProcessorLogEntry.__module__ == PseudoPaymentProcessor.__module__
related_field_name = "related"
# Behavior changs in Django 1.9
if VERSION >= (1, 9):
related_field_name = "rel"
relation_manager = getattr(PseudoPaymentProcessorLogEntry._meta.get_field("target"), related_field_name)
assert relation_manager.to is PseudoPaymentProcessor
relation_manager = getattr(PseudoPaymentProcessor.log_entries, related_field_name)
assert relation_manager.model is PseudoPaymentProcessor
assert relation_manager.related_model is PseudoPaymentProcessorLogEntry
assert issubclass(PseudoPaymentProcessorLogEntry, BaseLogEntry)
assert isinstance(PseudoPaymentProcessorLogEntry(), BaseLogEntry)
|
detiber/lib_openshift
|
refs/heads/master
|
test/test_v1_deployment_config_rollback_spec.py
|
2
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_deployment_config_rollback_spec import V1DeploymentConfigRollbackSpec
class TestV1DeploymentConfigRollbackSpec(unittest.TestCase):
""" V1DeploymentConfigRollbackSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1DeploymentConfigRollbackSpec(self):
"""
Test V1DeploymentConfigRollbackSpec
"""
model = lib_openshift.models.v1_deployment_config_rollback_spec.V1DeploymentConfigRollbackSpec()
if __name__ == '__main__':
unittest.main()
|
uta-smile/smile-python
|
refs/heads/master
|
tests/python/test_flags.py
|
1
|
"""Tests for our flags implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from smile import flags
flags.DEFINE_string("string_foo", "default_val", "HelpString")
flags.DEFINE_integer("int_foo", 42, "HelpString")
flags.DEFINE_float("float_foo", 42.0, "HelpString")
flags.DEFINE_boolean("bool_foo", True, "HelpString")
flags.DEFINE_boolean("bool_negation", True, "HelpString")
flags.DEFINE_boolean("bool-dash-negation", True, "HelpString")
flags.DEFINE_boolean("bool_a", False, "HelpString")
flags.DEFINE_boolean("bool_c", False, "HelpString")
flags.DEFINE_boolean("bool_d", True, "HelpString")
flags.DEFINE_bool("bool_e", True, "HelpString")
with flags.Subcommand("dummy_action", dest="action"):
pass
with flags.Subcommand("move", dest="action"):
flags.DEFINE_string("move_string", "default", "help")
flags.DEFINE_bool("move_bool", True, "HelpString")
with flags.Subcommand("dummy_object", dest="object"):
pass
with flags.Subcommand("wa", dest="object"):
flags.DEFINE_string("move_wa_string", "default_wa", "help")
flags.DEFINE_bool("move_wa_bool", False, "HelpString")
with flags.Subcommand("require", dest="action"):
flags.DEFINE_string(
"string_foo_required", "default_val", "HelpString", required=True)
flags.DEFINE_integer("int_foo_required", 42, "HelpString", required=True)
flags.DEFINE_float("float_foo_required", 42.0, "HelpString", required=True)
FLAGS = flags.FLAGS
class FlagsTest(unittest.TestCase):
"""Unit tests for flags library."""
def setUp(self):
"""Set up function."""
# provide the default action.
FLAGS._parse_flags(["dummy_action"]) # pylint: disable=protected-access
def test_string(self):
"""Test DEFINE_string."""
self.assertEqual("default_val", FLAGS.string_foo)
FLAGS._parse_flags(["--string_foo", "bar", "dummy_action"]) # pylint: disable=protected-access
self.assertEqual("bar", FLAGS.string_foo)
def test_bool(self):
"""Test DEFINE_bool."""
self.assertTrue(FLAGS.bool_foo)
FLAGS._parse_flags(["--nobool_foo", "dummy_action"]) # pylint: disable=protected-access
self.assertFalse(FLAGS.bool_foo)
def test_bool_commandlines(self):
"""Test DEFINE_bool."""
# Specified on command line with no args, sets to True,
# even if default is False.
FLAGS._parse_flags([ # pylint: disable=protected-access
"--bool_a", "--nobool_negation", "--bool_c=True", "--bool_d=False", "dummy_action"
])
self.assertEqual(True, FLAGS.bool_a)
# --no before the flag forces it to False, even if the
# default is True
self.assertEqual(False, FLAGS.bool_negation)
# --bool_flag=True sets to True
self.assertEqual(True, FLAGS.bool_c)
# --bool_flag=False sets to False
self.assertEqual(False, FLAGS.bool_d)
def test_int(self):
"""Test DEFINE_integer."""
self.assertEqual(42, FLAGS.int_foo)
FLAGS._parse_flags(["--int_foo", "-1", "dummy_action"]) # pylint: disable=protected-access
self.assertEqual(-1, FLAGS.int_foo)
def test_float(self):
"""Test DEFINE_float."""
self.assertEqual(42.0, FLAGS.float_foo)
FLAGS._parse_flags(["--float_foo", "-1.0", "dummy_action"]) # pylint: disable=protected-access
self.assertEqual(-1.0, FLAGS.float_foo)
def test_subcmd(self):
"""Test subcommand action and string."""
# test default value.
FLAGS._parse_flags(["move", "dummy_object"]) # pylint: disable=protected-access
self.assertEqual("move", FLAGS.action)
self.assertEqual("default", FLAGS.move_string)
self.assertTrue(FLAGS.move_bool)
# test change change value via commandline.
FLAGS._parse_flags( # pylint: disable=protected-access
["move", "--move_string", "up", "--nomove_bool", "dummy_object"])
self.assertEqual("move", FLAGS.action)
self.assertEqual("up", FLAGS.move_string)
self.assertFalse(FLAGS.move_bool)
def test_subsubcmd(self):
"""Test subsubcommand action and string."""
# test default value.
FLAGS._parse_flags(["move", "wa"]) # pylint: disable=protected-access
self.assertEqual("move", FLAGS.action)
self.assertEqual("wa", FLAGS.object)
self.assertEqual("default", FLAGS.move_string)
self.assertTrue(FLAGS.move_bool)
self.assertEqual("default_wa", FLAGS.move_wa_string)
self.assertFalse(FLAGS.move_wa_bool)
# test change change value via commandline.
FLAGS._parse_flags([ # pylint: disable=protected-access
"move", "--move_string", "up", "--nomove_bool", "wa",
"--move_wa_string", "haha", "--move_wa_bool"
])
self.assertEqual("move", FLAGS.action)
self.assertEqual("up", FLAGS.move_string)
self.assertFalse(FLAGS.move_bool)
self.assertEqual("haha", FLAGS.move_wa_string)
self.assertTrue(FLAGS.move_wa_bool)
def test_with_domain(self):
"""Test with domain action."""
with flags.Subcommand("dummy_action", dest="action"):
# test default value.
FLAGS._parse_flags(["move", "dummy_object"]) # pylint: disable=protected-access
self.assertEqual("move", FLAGS.action)
self.assertEqual("default", FLAGS.move_string)
self.assertTrue(FLAGS.move_bool)
def test_required(self):
"""Test required key flags."""
FLAGS._parse_flags(["require", "str_foo", "1", "0.5"]) # pylint: disable=protected-access
self.assertEqual("require", FLAGS.action)
self.assertEqual("str_foo", FLAGS.string_foo_required)
self.assertEqual(1, FLAGS.int_foo_required)
self.assertEqual(0.5, FLAGS.float_foo_required)
def test_absl_logging_flags(self):
"""Test absl logging flags."""
self.assertFalse(FLAGS.logtostderr)
self.assertFalse(FLAGS.alsologtostderr)
FLAGS._parse_flags(["move", "wa", "--alsologtostderr"]) # pylint: disable=protected-access
self.assertTrue(FLAGS.alsologtostderr)
|
tensorflow/tensorflow
|
refs/heads/master
|
tensorflow/python/autograph/pyct/errors.py
|
20
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code transformation exceptions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class PyCTError(Exception):
"""Base class for all exceptions."""
pass
class UnsupportedLanguageElementError(PyCTError, NotImplementedError):
"""Raised for code patterns that AutoGraph does not support."""
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.