repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
camptocamp/odoo
|
refs/heads/master
|
addons/hr_payroll_account/__openerp__.py
|
120
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payroll Accounting',
'version': '1.0',
'category': 'Human Resources',
'description': """
Generic Payroll system Integrated with Accounting.
==================================================
* Expense Encoding
* Payment Encoding
* Company Contribution Management
""",
'author':'OpenERP SA',
'website':'http://www.openerp.com',
'images': ['images/hr_employee_payslip.jpeg'],
'depends': [
'hr_payroll',
'account',
'hr_expense'
],
'data': ['hr_payroll_account_view.xml'],
'demo': ['hr_payroll_account_demo.xml'],
'test': ['test/hr_payroll_account.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
2ndy/RaspIM
|
refs/heads/master
|
usr/lib/python2.6/encodings/cp1257.py
|
593
|
""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1257',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\ufffe' # 0x83 -> UNDEFINED
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\ufffe' # 0x88 -> UNDEFINED
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\xa8' # 0x8D -> DIAERESIS
u'\u02c7' # 0x8E -> CARON
u'\xb8' # 0x8F -> CEDILLA
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\ufffe' # 0x9C -> UNDEFINED
u'\xaf' # 0x9D -> MACRON
u'\u02db' # 0x9E -> OGONEK
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\ufffe' # 0xA1 -> UNDEFINED
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\ufffe' # 0xA5 -> UNDEFINED
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xe6' # 0xBF -> LATIN SMALL LETTER AE
u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
edmorley/django
|
refs/heads/master
|
tests/gis_tests/maps/__init__.py
|
12133432
| |
dutwfk/pytest
|
refs/heads/master
|
leetcode/py/100.py
|
12133432
| |
dims/neutron
|
refs/heads/master
|
neutron/tests/unit/db/quota/__init__.py
|
12133432
| |
reckbo/ppl
|
refs/heads/master
|
config/axis_align_nrrd.py
|
1
|
#!/usr/bin/env python
#
# How does this compare to `unu unorient`?
import sys
import os
from os.path import basename, splitext, abspath, exists
import argparse
import tempfile
from subprocess import Popen, PIPE
import re
import numpy
from numpy import matrix, identity, diag
from numpy import linalg
from numpy.linalg import inv
from numpy.testing import assert_almost_equal
import fileinput
from subprocess import check_call
def pushd(tmpdir):
"""
Change the working directory of the decorated function.
Makes the decorated function's code cleaner.
"""
def wrap(f):
def new_function(*args, **kw):
orig_dir = os.getcwd()
print '* Changing to working dir %s' % tmpdir
os.chdir(tmpdir)
output = f(*args, **kw)
os.chdir(orig_dir)
return output
return new_function
return wrap
def t(cmd):
"""
>>> t(['ls', '-a', '>', '/dev/null'])
ls -a > /dev/null
>>> t('ls -a > /dev/null')
ls -a > /dev/null
"""
if isinstance(cmd, list):
cmd = ' '.join(cmd)
#print cmd
print
print "* " + cmd
check_call(cmd, shell=True)
def replace_line_in_file(afile, match_string, replace_with):
for line in fileinput.FileInput(afile, inplace=1):
if match_string in line:
line = replace_with
print line,
def find_spc_dir(s):
match = re.search(
'space directions: \((?P<xvec>(.*))\) \((?P<yvec>(.*))\) \((?P<zvec>(.*))\)',
s)
xvec = [float(x) for x in match.group('xvec').split(',')]
yvec = [float(x) for x in match.group('yvec').split(',')]
zvec = [float(x) for x in match.group('zvec').split(',')]
return [xvec, yvec, zvec]
def find_mf(s):
match = re.search(
'measurement frame: \((?P<xvec>(.*))\) \((?P<yvec>(.*))\) \((?P<zvec>(.*))\)',
s)
xvec = [float(x) for x in match.group('xvec').split(',')]
yvec = [float(x) for x in match.group('yvec').split(',')]
zvec = [float(x) for x in match.group('zvec').split(',')]
return [xvec, yvec, zvec]
def get_hdr(nrrd):
hdr, stderr = Popen(['unu', 'head', nrrd], stdout=PIPE,
stderr=PIPE).communicate()
return hdr
@pushd(tempfile.mkdtemp())
def get_numpy_rotation(spcdir_orig):
sizes = diag([linalg.norm(spcdir_orig[0,:]),linalg.norm(spcdir_orig[1,:]),linalg.norm(spcdir_orig[2,:])])
spcON = linalg.inv(sizes) * spcdir_orig
spcNN = numpy.zeros([3,3])
for i in range(0,3):
mi = numpy.argmax(abs(spcON[i,:]))
spcNN[i,mi] = numpy.sign(spcON[i,mi]);
R = spcNN * spcON.I
return R
def axis_align_dwi(dwi, outfile=None, precision=5):
dwi_hdr = get_hdr(dwi)
spcdir_orig = matrix(find_spc_dir(dwi_hdr))
print(spcdir_orig)
sizes = diag([linalg.norm(spcdir_orig[0,:]),linalg.norm(spcdir_orig[1,:]),linalg.norm(spcdir_orig[2,:])])
print(sizes)
R = get_numpy_rotation(spcdir_orig)
print(R)
spcdir_new = matrix.round(sizes *R*linalg.inv(sizes)*spcdir_orig,4)
print(spcdir_new)
mf_orig = find_mf(dwi_hdr)
print(mf_orig)
mf_new = matrix.round(R * matrix(mf_orig),4)
print(mf_new)
mf_new = [','.join(map(str, x)) for x in mf_new.tolist()]
newline = 'measurement frame: (%s) (%s) (%s)\n' % (mf_new[0], mf_new[1],
mf_new[2])
dwi_new = splitext(dwi)[0] + '_axisaligned.nhdr' if not outfile else \
outfile
t('unu save -f nrrd -e gzip -i "%s" -o "%s"' % (dwi, dwi_new))
replace_line_in_file(dwi_new, 'measurement frame:', newline)
newline = 'space directions: (%s) (%s) (%s) none\n' % (','.join(map(str, spcdir_new[0])),
','.join(map(str, spcdir_new[1])),
','.join(map(str, spcdir_new[2])))
replace_line_in_file(dwi_new, 'space directions:', newline)
# Why not just ConvertBetweenFileFormats??
def axis_align_3d(image, outfile=None):
img_hdr = get_hdr(image)
spcdir_orig = matrix(find_spc_dir(img_hdr))
print(spcdir_orig)
sizes = diag([linalg.norm(spcdir_orig[0,:]),linalg.norm(spcdir_orig[1,:]),linalg.norm(spcdir_orig[2,:])])
print(sizes)
R = get_numpy_rotation(spcdir_orig)
print(R)
spcdir_new = matrix.round(sizes *R*linalg.inv(sizes)*spcdir_orig,4)
print(spcdir_new)
image_new = splitext(image)[0] + '_axisaligned.nhdr' if not outfile else \
outfile
t('unu save -f nrrd -e gzip -i "%s" -o "%s"' % (image, image_new))
newline = 'space directions: (%s) (%s) (%s)\n' % (','.join(map(str, spcdir_new[0])),
','.join(map(str, spcdir_new[1])),
','.join(map(str, spcdir_new[2])))
replace_line_in_file(image_new, 'space directions:', newline)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('-i','--infile', help='a 3d or 4d nrrd image')
argparser.add_argument('-o','--outfile', help='a 3d or 4d nrrd image',
required=False)
argparser.add_argument('-p','--precision',
help='precision of computed rotation matrix for dwi gradients',
required=False, type=int, default=5)
argparser.add_argument('--overwrite', action='store_true', default=False,
help='overwrite outfile if it exists')
args = argparser.parse_args()
image_in = abspath(args.infile)
if not exists(image_in):
print image_in + ' doesn\'t exist'
return
#if not nrrdlib.nrrd_is_valid(image_in):
#print image_in + ' is not a valid nrrd'
#return
if exists(args.outfile) and not args.overwrite:
print args.outfile + ' already exists.'
print 'Delete it first.'
sys.exit(1)
match = re.search('dimension: (?P<dimension>\d)', get_hdr(image_in))
dim = match.group('dimension')
if dim == '4':
axis_align_dwi(image_in, outfile=args.outfile,
precision=args.precision)
elif dim == '3':
axis_align_3d(image_in, outfile=args.outfile)
else:
print image_in + ' has dimension %s, needs to be 3 or 4' % dim
if __name__ == '__main__':
main()
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/twisted/test/test_randbytes.py
|
18
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.python.randbytes}.
"""
import os
from twisted.trial import unittest
from twisted.python import randbytes
class SecureRandomTestCaseBase(object):
"""
Base class for secureRandom test cases.
"""
def _check(self, source):
"""
The given random bytes source should return the number of bytes
requested each time it is called and should probably not return the
same bytes on two consecutive calls (although this is a perfectly
legitimate occurrence and rejecting it may generate a spurious failure
-- maybe we'll get lucky and the heat death with come first).
"""
for nbytes in range(17, 25):
s = source(nbytes)
self.assertEquals(len(s), nbytes)
s2 = source(nbytes)
self.assertEquals(len(s2), nbytes)
# This is crude but hey
self.assertNotEquals(s2, s)
class SecureRandomTestCase(SecureRandomTestCaseBase, unittest.TestCase):
"""
Test secureRandom under normal conditions.
"""
def test_normal(self):
"""
L{randbytes.secureRandom} should return a string of the requested
length and make some effort to make its result otherwise unpredictable.
"""
self._check(randbytes.secureRandom)
class ConditionalSecureRandomTestCase(SecureRandomTestCaseBase,
unittest.TestCase):
"""
Test random sources one by one, then remove it to.
"""
def setUp(self):
"""
Create a L{randbytes.RandomFactory} to use in the tests.
"""
self.factory = randbytes.RandomFactory()
def errorFactory(self, nbytes):
"""
A factory raising an error when a source is not available.
"""
raise randbytes.SourceNotAvailable()
def test_osUrandom(self):
"""
L{RandomFactory._osUrandom} should work as a random source whenever
L{os.urandom} is available.
"""
self._check(self.factory._osUrandom)
def test_fileUrandom(self):
"""
L{RandomFactory._fileUrandom} should work as a random source whenever
C{/dev/urandom} is available.
"""
try:
self._check(self.factory._fileUrandom)
except randbytes.SourceNotAvailable:
# The test should only fail in /dev/urandom doesn't exist
self.assertFalse(os.path.exists('/dev/urandom'))
def test_withoutAnything(self):
"""
Remove all secure sources and assert it raises a failure. Then try the
fallback parameter.
"""
self.factory._osUrandom = self.errorFactory
self.factory._fileUrandom = self.errorFactory
self.assertRaises(randbytes.SecureRandomNotAvailable,
self.factory.secureRandom, 18)
def wrapper():
return self.factory.secureRandom(18, fallback=True)
s = self.assertWarns(
RuntimeWarning,
"urandom unavailable - "
"proceeding with non-cryptographically secure random source",
__file__,
wrapper)
self.assertEquals(len(s), 18)
class RandomTestCaseBase(SecureRandomTestCaseBase, unittest.TestCase):
"""
'Normal' random test cases.
"""
def test_normal(self):
"""
Test basic case.
"""
self._check(randbytes.insecureRandom)
def test_withoutGetrandbits(self):
"""
Test C{insecureRandom} without C{random.getrandbits}.
"""
factory = randbytes.RandomFactory()
factory.getrandbits = None
self._check(factory.insecureRandom)
|
ypwalter/fxos-certsuite
|
refs/heads/master
|
mcts/webapi_tests/telephony/test_telephony_incoming_hold_outgoing.py
|
3
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import time
from marionette_driver.wait import Wait
from mcts.webapi_tests.semiauto import TestCase
from mcts.webapi_tests.telephony import TelephonyTestCommon
class TestTelephonyIncomingHoldOutgoing(TestCase, TelephonyTestCommon):
"""
This is a test for the `WebTelephony API`_ which will:
- Disable the default gaia dialer, so that the test app can handle calls
- Setup a mozTelephony event listener for incoming calls
- Ask the test user to phone the Firefox OS device from a second phone
- Verify that the mozTelephony incoming call event is triggered
- Answer the incoming call via the API, keep the call active for 5 seconds
- Use the API to initiate the outgoing call from the Firefox OS device to third phone
- Verify the first call state should be held and corresponding events were triggered
- Ask the test user to answer the call on the third phone
- Verify that the first call state still should be on held while second call becomes active
- Hang up the connected call via the API
- Verify the held call is now resumed and the only active call
- Hang up the remaining active call via the API
- Verify that the corresponding mozTelephonyCall events were triggered
- Re-enable the default gaia dialer
.. _`WebTelephony API`: https://developer.mozilla.org/en-US/docs/Web/Guide/API/Telephony
"""
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
TelephonyTestCommon.__init__(self)
def setUp(self):
self.addCleanup(self.clean_up)
super(TestTelephonyIncomingHoldOutgoing, self).setUp()
self.wait_for_obj("window.navigator.mozTelephony")
# disable the default dialer manager so it doesn't grab our calls
self.disable_dialer()
def test_telephony_incoming_hold_outgoing(self):
# ask user to call the device; answer and verify via webapi
self.user_guided_incoming_call()
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['0'], self.incoming_call)
self.answer_call()
self.assertEqual(self.active_call_list[0]['state'], "connected", "Call state should be 'connected'")
self.assertEqual(self.active_call_list[0]['number'], self.incoming_call['number'])
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 1, "There should be 1 active call")
# keep call active for a while
time.sleep(5)
self.hold_active_call(user_initiate_hold=False)
# use the webapi to make an outgoing call to user-specified number
self.user_guided_outgoing_call()
# setup the 'onheld' event handler
wait = Wait(self.marionette, timeout=30, interval=0.5)
try:
wait.until(lambda x: x.execute_script("return window.wrappedJSObject.onheld_call_ok"))
wait.until(lambda x: x.execute_script("return window.wrappedJSObject.received_statechange"))
except:
# failed to hold
self.fail("Failed to put first active call on hold while second call becomes active")
onholding = self.marionette.execute_script("return window.wrappedJSObject.onholding_call_ok")
self.assertFalse(onholding, "Telephony.onholding event found, but should not have been "
"since the phone user did not initiate holding the call")
# verify that there are two calls of which first incoming call is held while second is outgoing call
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 2, "There should be 2 calls")
self.outgoing_call = self.marionette.execute_script("return window.wrappedJSObject.returnable_outgoing_call")
self.assertEqual(self.calls['1'], self.outgoing_call)
self.assertEqual(self.calls['0']['state'], "held", "Call state should be 'held'")
# have user answer the call on target
self.answer_call(incoming=False)
# keep call active for a while
time.sleep(5)
# verify the active call
self.assertEqual(self.active_call_list[1]['state'], "connected", "Call state should be 'connected'")
self.assertEqual(self.active_call_list[1]['number'], self.outgoing_call['number'])
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 2, "There should be 2 active call")
# verify call state change
self.assertEqual(self.calls['0']['state'], "held", "Call state should be 'held'")
self.assertEqual(self.calls['1']['state'], "connected", "Call state should be 'connected'")
# disconnect the two active calls
self.hangup_call(active_call_selected=1)
# verify number of remaining calls and its state
wait = Wait(self.marionette, timeout=10, interval=0.5)
try:
wait.until(lambda x: x.execute_script("return (window.wrappedJSObject.calls.length == 1)"))
self.resume()
wait.until(lambda x: x.execute_script("return (window.wrappedJSObject.calls[0].state == \"connected\")"))
except:
self.fail("Failed to hangup the second call or change the state of first call")
# disconnect the active call
self.hangup_call(active_call_selected=0)
self.calls = self.marionette.execute_script("return window.wrappedJSObject.get_returnable_calls()")
self.assertEqual(self.calls['length'], 0, "There should be 0 calls")
def clean_up(self):
# re-enable the default dialer manager
self.enable_dialer()
self.active_call_list = []
|
Rhizi/rhizi
|
refs/heads/master
|
rhizi/rz_feedback.py
|
1
|
# This file is part of rhizi, a collaborative knowledge graph editor.
# Copyright (C) 2014-2015 Rhizi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
from flask import current_app
from flask import json
from flask import request
from flask import session
import logging
from .rz_mail import send_email__flask_ctx
from .rz_req_handling import make_response__json, HTTP_STATUS__500_INTERNAL_SERVER_ERROR
log = logging.getLogger('rhizi')
class RZ_User_Feedback(object):
def __init__(self, url=None,
note=None,
img=None,
html=None,
user_agent=None):
self.url = url,
self.note = note
self.img = img
self.html = html
self.user_agent = user_agent
def decode_base64_uri(base64_encoded_data_uri):
start = base64_encoded_data_uri.find(',') + 1
encoded = base64_encoded_data_uri[start:]
return base64.decodestring(encoded)
def rest__send_user_feedback__email():
"""
REST API endpoint: send user feedback by email along with screen capture attachments
"""
def sanitize_input(req):
req_dict = req.get_json()
url = req_dict['url']
note = req_dict['note']
img = decode_base64_uri(req_dict['img'])
html = req_dict['html']
user_agent = req_dict['browser']['userAgent']
return RZ_User_Feedback(url=url,
note=note,
img=img,
html=html,
user_agent=user_agent)
try:
u_feedback = sanitize_input(request)
except:
log.warn('failed to sanitize inputs. request: %s' % request)
return make_response__json(status=400) # bad Request
# FIXME: should be async via celery (or another method)
session_user = session.get('username')
msg_body = ['Feedback from user:',
'',
'user: %s' % (session_user if session_user else "<not-logged-in>"),
'user-agent: %s' % (u_feedback.user_agent),
'watching URL: %s' % (u_feedback.url),
'user-note: %s' % (u_feedback.note),
''
]
msg_body = '\n'.join(msg_body)
try:
send_email__flask_ctx(recipients=[current_app.rz_config.feedback_recipient],
subject="User Feedback",
body=msg_body,
attachments=[('feedback_screenshot.png', 'image/png', u_feedback.img),
('feedback_page.html', 'text/html', u_feedback.html.encode('utf-8')),
])
return make_response__json() # return empty json response
except Exception:
log.exception('send_user_feedback__email: exception while sending email') # exception derived from stack
return make_response__json(status=HTTP_STATUS__500_INTERNAL_SERVER_ERROR)
|
pombredanne/pythran
|
refs/heads/master
|
pythran/tests/cases/scrabble.py
|
4
|
#from http://stackoverflow.com/questions/18345202/functional-vs-imperative-style-in-python
#pythran export scrabble_fun_score(str, str: int dict)
#pythran export scrabble_imp_score(str, str: int dict)
#runas scrabble_fun_score('tralala', {'t': 1, 'r': 2, 'a': 3, 'l': 4})
#runas scrabble_fun_score('tralala', {'t': 1, 'r': 2, 'a': 3, 'l': 4})
#bench import string; import random; a = "".join([random.choice(string.letters) for i in xrange(12000000)]); v = dict(zip(string.letters, range(1000))); scrabble_fun_score(a, v)
def scrabble_fun_score(word, scoretable):
return sum([scoretable.get(x, 0) for x in word])
def scrabble_imp_score(word, scoretable):
score = 0
for letter in word:
if letter in scoretable:
score += scoretable[letter]
return score
|
natanlailari/PennApps2015-Heartmates
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/__init__.py
|
327
|
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2014 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.3.0'
__build__ = 0x020300
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2014 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
gurneyalex/OpenUpgrade
|
refs/heads/master
|
addons/account_followup/report/account_followup_print.py
|
40
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from collections import defaultdict
from openerp.osv import osv
from openerp.report import report_sxw
class report_rappel(report_sxw.rml_parse):
_name = "account_followup.report.rappel"
def __init__(self, cr, uid, name, context=None):
super(report_rappel, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'ids_to_objects': self._ids_to_objects,
'getLines': self._lines_get,
'get_text': self._get_text
})
def _ids_to_objects(self, ids):
all_lines = []
for line in self.pool['account_followup.stat.by.partner'].browse(self.cr, self.uid, ids):
if line not in all_lines:
all_lines.append(line)
return all_lines
def _lines_get(self, stat_by_partner_line):
return self._lines_get_with_partner(stat_by_partner_line.partner_id, stat_by_partner_line.company_id.id)
def _lines_get_with_partner(self, partner, company_id):
moveline_obj = self.pool['account.move.line']
moveline_ids = moveline_obj.search(self.cr, self.uid, [
('partner_id', '=', partner.id),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
])
# lines_per_currency = {currency: [line data, ...], ...}
lines_per_currency = defaultdict(list)
for line in moveline_obj.browse(self.cr, self.uid, moveline_ids):
currency = line.currency_id or line.company_id.currency_id
line_data = {
'name': line.move_id.name,
'ref': line.ref,
'date': line.date,
'date_maturity': line.date_maturity,
'balance': line.amount_currency if currency != line.company_id.currency_id else line.debit - line.credit,
'blocked': line.blocked,
'currency_id': currency,
}
lines_per_currency[currency].append(line_data)
return [{'line': lines} for lines in lines_per_currency.values()]
def _get_text(self, stat_line, followup_id, context=None):
if context is None:
context = {}
context.update({'lang': stat_line.partner_id.lang})
fp_obj = self.pool['account_followup.followup']
fp_line = fp_obj.browse(self.cr, self.uid, followup_id, context=context).followup_line
if not fp_line:
raise osv.except_osv(_('Error!'),_("The followup plan defined for the current company does not have any followup action."))
#the default text will be the first fp_line in the sequence with a description.
default_text = ''
li_delay = []
for line in fp_line:
if not default_text and line.description:
default_text = line.description
li_delay.append(line.delay)
li_delay.sort(reverse=True)
a = {}
#look into the lines of the partner that already have a followup level, and take the description of the higher level for which it is available
partner_line_ids = self.pool['account.move.line'].search(self.cr, self.uid, [('partner_id','=',stat_line.partner_id.id),('reconcile_id','=',False),('company_id','=',stat_line.company_id.id),('blocked','=',False),('state','!=','draft'),('debit','!=',False),('account_id.type','=','receivable'),('followup_line_id','!=',False)])
partner_max_delay = 0
partner_max_text = ''
for i in self.pool['account.move.line'].browse(self.cr, self.uid, partner_line_ids, context=context):
if i.followup_line_id.delay > partner_max_delay and i.followup_line_id.description:
partner_max_delay = i.followup_line_id.delay
partner_max_text = i.followup_line_id.description
text = partner_max_delay and partner_max_text or default_text
if text:
text = text % {
'partner_name': stat_line.partner_id.name,
'date': time.strftime('%Y-%m-%d'),
'company_name': stat_line.company_id.name,
'user_signature': self.pool['res.users'].browse(self.cr, self.uid, self.uid, context).signature or '',
}
return text
class report_followup(osv.AbstractModel):
_name = 'report.account_followup.report_followup'
_inherit = 'report.abstract_report'
_template = 'account_followup.report_followup'
_wrapped_report_class = report_rappel
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
arnavd96/Cinemiezer
|
refs/heads/master
|
myvenv/lib/python3.4/site-packages/unidecode/x0ff.py
|
252
|
data = (
'[?]', # 0x00
'!', # 0x01
'"', # 0x02
'#', # 0x03
'$', # 0x04
'%', # 0x05
'&', # 0x06
'\'', # 0x07
'(', # 0x08
')', # 0x09
'*', # 0x0a
'+', # 0x0b
',', # 0x0c
'-', # 0x0d
'.', # 0x0e
'/', # 0x0f
'0', # 0x10
'1', # 0x11
'2', # 0x12
'3', # 0x13
'4', # 0x14
'5', # 0x15
'6', # 0x16
'7', # 0x17
'8', # 0x18
'9', # 0x19
':', # 0x1a
';', # 0x1b
'<', # 0x1c
'=', # 0x1d
'>', # 0x1e
'?', # 0x1f
'@', # 0x20
'A', # 0x21
'B', # 0x22
'C', # 0x23
'D', # 0x24
'E', # 0x25
'F', # 0x26
'G', # 0x27
'H', # 0x28
'I', # 0x29
'J', # 0x2a
'K', # 0x2b
'L', # 0x2c
'M', # 0x2d
'N', # 0x2e
'O', # 0x2f
'P', # 0x30
'Q', # 0x31
'R', # 0x32
'S', # 0x33
'T', # 0x34
'U', # 0x35
'V', # 0x36
'W', # 0x37
'X', # 0x38
'Y', # 0x39
'Z', # 0x3a
'[', # 0x3b
'\\', # 0x3c
']', # 0x3d
'^', # 0x3e
'_', # 0x3f
'`', # 0x40
'a', # 0x41
'b', # 0x42
'c', # 0x43
'd', # 0x44
'e', # 0x45
'f', # 0x46
'g', # 0x47
'h', # 0x48
'i', # 0x49
'j', # 0x4a
'k', # 0x4b
'l', # 0x4c
'm', # 0x4d
'n', # 0x4e
'o', # 0x4f
'p', # 0x50
'q', # 0x51
'r', # 0x52
's', # 0x53
't', # 0x54
'u', # 0x55
'v', # 0x56
'w', # 0x57
'x', # 0x58
'y', # 0x59
'z', # 0x5a
'{', # 0x5b
'|', # 0x5c
'}', # 0x5d
'~', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'.', # 0x61
'[', # 0x62
']', # 0x63
',', # 0x64
'*', # 0x65
'wo', # 0x66
'a', # 0x67
'i', # 0x68
'u', # 0x69
'e', # 0x6a
'o', # 0x6b
'ya', # 0x6c
'yu', # 0x6d
'yo', # 0x6e
'tu', # 0x6f
'+', # 0x70
'a', # 0x71
'i', # 0x72
'u', # 0x73
'e', # 0x74
'o', # 0x75
'ka', # 0x76
'ki', # 0x77
'ku', # 0x78
'ke', # 0x79
'ko', # 0x7a
'sa', # 0x7b
'si', # 0x7c
'su', # 0x7d
'se', # 0x7e
'so', # 0x7f
'ta', # 0x80
'ti', # 0x81
'tu', # 0x82
'te', # 0x83
'to', # 0x84
'na', # 0x85
'ni', # 0x86
'nu', # 0x87
'ne', # 0x88
'no', # 0x89
'ha', # 0x8a
'hi', # 0x8b
'hu', # 0x8c
'he', # 0x8d
'ho', # 0x8e
'ma', # 0x8f
'mi', # 0x90
'mu', # 0x91
'me', # 0x92
'mo', # 0x93
'ya', # 0x94
'yu', # 0x95
'yo', # 0x96
'ra', # 0x97
'ri', # 0x98
'ru', # 0x99
're', # 0x9a
'ro', # 0x9b
'wa', # 0x9c
'n', # 0x9d
':', # 0x9e
';', # 0x9f
'', # 0xa0
'g', # 0xa1
'gg', # 0xa2
'gs', # 0xa3
'n', # 0xa4
'nj', # 0xa5
'nh', # 0xa6
'd', # 0xa7
'dd', # 0xa8
'r', # 0xa9
'lg', # 0xaa
'lm', # 0xab
'lb', # 0xac
'ls', # 0xad
'lt', # 0xae
'lp', # 0xaf
'rh', # 0xb0
'm', # 0xb1
'b', # 0xb2
'bb', # 0xb3
'bs', # 0xb4
's', # 0xb5
'ss', # 0xb6
'', # 0xb7
'j', # 0xb8
'jj', # 0xb9
'c', # 0xba
'k', # 0xbb
't', # 0xbc
'p', # 0xbd
'h', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'a', # 0xc2
'ae', # 0xc3
'ya', # 0xc4
'yae', # 0xc5
'eo', # 0xc6
'e', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'yeo', # 0xca
'ye', # 0xcb
'o', # 0xcc
'wa', # 0xcd
'wae', # 0xce
'oe', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'yo', # 0xd2
'u', # 0xd3
'weo', # 0xd4
'we', # 0xd5
'wi', # 0xd6
'yu', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'eu', # 0xda
'yi', # 0xdb
'i', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'/C', # 0xe0
'PS', # 0xe1
'!', # 0xe2
'-', # 0xe3
'|', # 0xe4
'Y=', # 0xe5
'W=', # 0xe6
'[?]', # 0xe7
'|', # 0xe8
'-', # 0xe9
'|', # 0xea
'-', # 0xeb
'|', # 0xec
'#', # 0xed
'O', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'{', # 0xf9
'|', # 0xfa
'}', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
|
CuonDeveloper/cuon
|
refs/heads/master
|
cuon_server/src/cuon/CuonFuncs.py
|
3
|
import xmlrpclib
from twisted.web import xmlrpc
#from twisted.internet import defer
#from twisted.internet import reactor
#from twisted.web import server
import os
import sys
import time
import random
import xmlrpclib
class CuonFuncs(xmlrpc.XMLRPC ):
#def __init__(self):
# pass
def xmlrpc_test1(self, f, s):
f = open(f, 'a')
f.write(s)
f.close()
return 'Hallo'
|
eerimoq/cantools
|
refs/heads/master
|
tests/test_plot_without_mock.py
|
1
|
#!/usr/bin/env python3
import os
import sys
import unittest
from unittest import mock
from io import StringIO
import cantools
import matplotlib.pyplot as plt
class CanToolsPlotTest(unittest.TestCase):
DBC_FILE = os.path.join(os.path.split(__file__)[0], 'files/dbc/abs.dbc')
FN_OUT = "out.pdf"
def test_plot_tz(self):
self.assertFalse(os.path.exists(self.FN_OUT))
argv = ['cantools', 'plot', '-o', self.FN_OUT, self.DBC_FILE]
input_data = """\
(000.000000) vcan0 00000343 [8] C5 04 B7 04 9B 04 C5 04
(001.001787) vcan0 00000343 [8] 69 04 69 04 77 04 7E 04
(002.003592) vcan0 00000343 [8] 29 04 30 04 29 04 22 04
(003.005400) vcan0 00000343 [8] FC 03 20 04 20 04 FC 03
(004.006942) vcan0 00000343 [8] DE 03 D0 03 D0 03 C9 03
(005.008400) vcan0 00000343 [8] 7E 03 85 03 8C 03 77 03
(006.009926) vcan0 00000343 [8] 65 03 3B 03 50 03 65 03
(007.011457) vcan0 00000343 [8] 17 03 3B 03 34 03 10 03
(008.013215) vcan0 00000343 [8] 00 03 F2 02 15 03 F9 02
(009.014779) vcan0 00000343 [8] CB 02 BC 02 B5 02 D2 02
"""
with mock.patch('sys.stdin', StringIO(input_data)):
with mock.patch('sys.argv', argv):
cantools._main()
self.assertTrue(os.path.exists(self.FN_OUT))
os.remove(self.FN_OUT)
def test_plot_style(self):
self.assertFalse(os.path.exists(self.FN_OUT))
argv = ['cantools', 'plot', '--style', 'seaborn', '-o', self.FN_OUT, self.DBC_FILE]
input_data = """\
(000.000000) vcan0 00000343 [8] C5 04 B7 04 9B 04 C5 04
(001.001787) vcan0 00000343 [8] 69 04 69 04 77 04 7E 04
(002.003592) vcan0 00000343 [8] 29 04 30 04 29 04 22 04
(003.005400) vcan0 00000343 [8] FC 03 20 04 20 04 FC 03
(004.006942) vcan0 00000343 [8] DE 03 D0 03 D0 03 C9 03
(005.008400) vcan0 00000343 [8] 7E 03 85 03 8C 03 77 03
(006.009926) vcan0 00000343 [8] 65 03 3B 03 50 03 65 03
(007.011457) vcan0 00000343 [8] 17 03 3B 03 34 03 10 03
(008.013215) vcan0 00000343 [8] 00 03 F2 02 15 03 F9 02
(009.014779) vcan0 00000343 [8] CB 02 BC 02 B5 02 D2 02
"""
with mock.patch('sys.stdin', StringIO(input_data)):
with mock.patch('sys.argv', argv):
cantools._main()
self.assertTrue(os.path.exists(self.FN_OUT))
os.remove(self.FN_OUT)
def test_plot_list_styles(self):
self.assertFalse(os.path.exists(self.FN_OUT))
argv = ['cantools', 'plot', '--list-styles', '']
stdout = StringIO()
expected = "available matplotlib styles:"
expected += "".join("\n- %s" % s for s in plt.style.available)
expected += "\n"
with mock.patch('sys.stdout', stdout):
with mock.patch('sys.argv', argv):
cantools._main()
self.assertEqual(stdout.getvalue(), expected)
if __name__ == '__main__':
unittest.main()
|
kvesteri/sqlalchemy-json-api
|
refs/heads/master
|
tests/test_select_one.py
|
2
|
import json
import pytest
@pytest.mark.usefixtures('table_creator', 'dataset')
class TestSelectOne(object):
def test_with_from_obj(self, query_builder, session, user_cls):
query = query_builder.select_one(
user_cls,
1,
fields={'users': ['all_friends']},
from_obj=session.query(user_cls)
)
assert session.execute(query).scalar() == {
'data': {
'relationships': {
'all_friends': {'data': [{'id': '2', 'type': 'users'}]}
},
'id': '1',
'type': 'users'
}
}
def test_without_from_obj(self, query_builder, session, user_cls):
query = query_builder.select_one(
user_cls,
1,
fields={'users': ['all_friends']},
)
assert session.execute(query).scalar() == {
'data': {
'relationships': {
'all_friends': {'data': [{'id': '2', 'type': 'users'}]}
},
'id': '1',
'type': 'users'
}
}
def test_empty_result(self, query_builder, session, user_cls):
query = query_builder.select_one(
user_cls,
99,
)
assert session.execute(query).scalar() is None
def test_as_text_parameter(self, query_builder, session, article_cls):
query = query_builder.select_one(
article_cls,
1,
fields={'articles': ['name']},
as_text=True
)
assert json.loads(session.execute(query).scalar()) == {
'data': {
'type': 'articles',
'id': '1',
'attributes': {
'name': 'Some article'
}
}
}
|
calfonso/ansible
|
refs/heads/devel
|
test/integration/targets/setup_rpm_repo/files/create-repo.py
|
67
|
#!/usr/bin/env python
import sys
from collections import namedtuple
import rpmfluff
RPM = namedtuple('RPM', ['name', 'version', 'release', 'epoch'])
SPECS = [
RPM('foo', '1.0', '1', None),
RPM('foo', '1.0', '2', '1'),
RPM('foo', '1.1', '1', '1'),
]
def main():
try:
arch = sys.argv[1]
except IndexError:
arch = 'x86_64'
pkgs = []
for spec in SPECS:
pkg = rpmfluff.SimpleRpmBuild(spec.name, spec.version, spec.release, [arch])
pkg.epoch = spec.epoch
pkgs.append(pkg)
repo = rpmfluff.YumRepoBuild(pkgs)
repo.make(arch)
for pkg in pkgs:
pkg.clean()
print(repo.repoDir)
if __name__ == "__main__":
main()
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
cpython/211_test_compare.py
|
5
|
import sys
from test_support import *
class Empty:
def __repr__(self):
return '<Empty>'
class Coerce:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return '<Coerce %s>' % self.arg
def __coerce__(self, other):
if isinstance(other, Coerce):
return self.arg, other.arg
else:
return self.arg, other
class Cmp:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<Cmp %s>' % self.arg
def __cmp__(self, other):
return cmp(self.arg, other)
candidates = [2, 2.0, 2L, 2+0j, [1], (3,), None, Empty(), Coerce(2), Cmp(2.0)]
def test():
for a in candidates:
for b in candidates:
try:
x = a == b
except:
print 'cmp(%s, %s) => %s' % (a, b, sys.exc_info()[0])
else:
if x:
print "%s == %s" % (a, b)
else:
print "%s != %s" % (a, b)
test()
|
webmasterraj/GaSiProMo
|
refs/heads/master
|
flask/lib/python2.7/site-packages/boto/ec2/spotdatafeedsubscription.py
|
18
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Spot Instance Datafeed Subscription
"""
from boto.ec2.ec2object import EC2Object
from boto.ec2.spotinstancerequest import SpotInstanceStateFault
class SpotDatafeedSubscription(EC2Object):
def __init__(self, connection=None, owner_id=None,
bucket=None, prefix=None, state=None,fault=None):
super(SpotDatafeedSubscription, self).__init__(connection)
self.owner_id = owner_id
self.bucket = bucket
self.prefix = prefix
self.state = state
self.fault = fault
def __repr__(self):
return 'SpotDatafeedSubscription:%s' % self.bucket
def startElement(self, name, attrs, connection):
if name == 'fault':
self.fault = SpotInstanceStateFault()
return self.fault
else:
return None
def endElement(self, name, value, connection):
if name == 'ownerId':
self.owner_id = value
elif name == 'bucket':
self.bucket = value
elif name == 'prefix':
self.prefix = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_spot_datafeed_subscription(
dry_run=dry_run
)
|
40223249-1/0622W17
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/multiprocessing/dummy/__init__.py
|
693
|
#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
eedf/jeito
|
refs/heads/master
|
accounting/migrations/0016_transaction_reconciliation.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-27 15:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounting', '0015_bankstatement_number'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='reconciliation',
field=models.DateField(blank=True, null=True, verbose_name='Rapprochement'),
),
]
|
juangj/selenium
|
refs/heads/master
|
py/test/selenium/webdriver/chrome/__init__.py
|
2454
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
googleapis/sphinx-docfx-yaml
|
refs/heads/master
|
docfx_yaml/writer.py
|
1
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
This is a forked version of the Sphinx text writer.
It outputs DocFX Markdown from the docutils doctree,
allowing us to transform transformed RST in memory to markdown.
It is certainly **not** complete,
and only implement as much logic as would be expected in normal docstring usage.
It is not intended to be a generic rst->markdown converter,
because rst contains myriad structures that markdown can't represent.
"""
import json
import os
import re
import sys
import textwrap
from itertools import groupby
from docutils import nodes, writers
from docutils.utils import column_width
from docutils.nodes import TextElement, Text, Node
from sphinx import addnodes
from sphinx.locale import admonitionlabels
from .nodes import remarks
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class TextWrapper(textwrap.TextWrapper):
"""Custom subclass that uses a different word separator regex."""
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=\s)(?::[a-z-]+:)?`\S+|' # interpreted text start
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
drop_whitespace = getattr(self, 'drop_whitespace', True) # py25 compat
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = self.width - column_width(indent)
if drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = column_width(chunks[-1])
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
else:
break
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
if drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
def _break_word(self, word, space_left):
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
"""
total = 0
for i, c in enumerate(word):
total += column_width(c)
if total > space_left:
return word[:i-1], word[i-1:]
return word, ''
def _split(self, text):
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
return textwrap.TextWrapper._split(self, t)
chunks = []
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
chunks.extend(split(''.join(g)))
else:
chunks.extend(list(g))
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Override original method for using self._break_word() instead of slice.
"""
space_left = max(width - cur_len, 1)
if self.break_long_words:
l, r = self._break_word(reversed_chunks[-1], space_left)
cur_line.append(l)
reversed_chunks[-1] = r
elif not cur_line:
cur_line.append(reversed_chunks.pop())
MAXWIDTH = 999
STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
class MarkdownWriter(writers.Writer):
"""
This writer is used to produce the markdown
written in yaml files (summaries), it is distinct from the
markdown outputter which process the whole documentation.
"""
supported = ('text',)
settings_spec = ('No options here.', '', ())
settings_defaults = {}
output = None
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
self.translator_class = MarkdownTranslator
def translate(self):
visitor = self.translator_class(self.document, self.builder)
self.document.walkabout(visitor)
self.output = visitor.body
class MarkdownTranslator(nodes.NodeVisitor):
sectionchars = '*=-~"+`'
xref_template = "<xref:{0}>"
def __init__(self, document, builder):
self.invdata = []
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
newlines = builder.config.text_newlines
if newlines == 'windows':
self.nl = '\r\n'
elif newlines == 'native':
self.nl = os.linesep
else:
self.nl = '\n'
self.sectionchars = builder.config.text_sectionchars
self.states = [[]]
self.stateindent = [0]
self.list_counter = []
self.sectionlevel = 0
self.lineblocklevel = 0
self.table = None
@staticmethod
def resolve_reference_in_node(node):
if node.tagname == 'reference':
ref_string = MarkdownTranslator._resolve_reference(node)
if not node.parent is None:
for i, n in enumerate(node.parent):
if n is node: # Replace the reference node.
node.parent.children[i] = Text(ref_string)
break
else: # If reference node has no parent, replace it's content.
node.clear()
node.children.append(Text(ref_string))
else:
for child in node:
if isinstance(child, Node):
MarkdownTranslator.resolve_reference_in_node(child)
def add_text(self, text):
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
self.states.append([])
self.stateindent.append(indent)
def clear_last_state(self):
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
return content, maxindent, indent
def end_state(self, wrap=False, end=[''], first=None):
content, maxindent, indent = self.clear_last_state()
result = []
toformat = []
def do_format():
if not toformat:
return
if wrap:
res = my_wrap(''.join(toformat), width=MAXWIDTH-maxindent)
else:
res = ''.join(toformat).splitlines()
if end:
res += end
result.append((indent, res))
for itemindent, item in content:
if itemindent == -1:
toformat.append(item)
else:
do_format()
result.append((indent + itemindent, item))
toformat = []
do_format()
if first is not None and result:
itemindent, item = result[0]
result_rest, result = result[1:], []
if item:
toformat = [first + ' '.join(item)]
do_format() # re-create `result` from `toformat`
_dummy, new_item = result[0]
result.insert(0, (itemindent - indent, [new_item[0]]))
result[1] = (itemindent, new_item[1:])
result.extend(result_rest)
self.states[-1].extend(result)
def visit_document(self, node):
self.new_state(0)
def depart_document(self, node):
self.end_state()
self.body = self.nl.join(line and (' '*indent + line)
for indent, lines in self.states[0]
for line in lines)
# XXX header/footer?
def visit_highlightlang(self, node):
raise nodes.SkipNode
def visit_section(self, node):
self._title_char = self.sectionchars[self.sectionlevel]
self.sectionlevel += 1
def depart_section(self, node):
self.sectionlevel -= 1
def visit_topic(self, node):
# Skip TOC in the articles
raise nodes.SkipNode
def depart_topic(self, node):
pass
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node):
self.new_state(0)
self.add_text('-[ ')
def depart_rubric(self, node):
self.add_text(' ]-')
self.end_state()
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_title(self, node):
depth = -1
element = node.parent
while (element is not None):
depth += 1
element = element.parent
self.add_text(self.nl * 2 + (depth * '#') + ' ')
def depart_title(self, node):
pass
def visit_subtitle(self, node):
pass
def depart_subtitle(self, node):
pass
def visit_attribution(self, node):
self.add_text('-- ')
def depart_attribution(self, node):
pass
def visit_desc(self, node):
pass
def depart_desc(self, node):
pass
def visit_desc_signature(self, node):
self.new_state(0)
def depart_desc_signature(self, node):
# XXX: wrap signatures in a way that makes sense
self.end_state(wrap=False, end=None)
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.add_text(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_parameterlist(self, node):
self.add_text('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.add_text(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.add_text(', ')
else:
self.first_param = 0
self.add_text(node.astext())
raise nodes.SkipNode
def visit_desc_optional(self, node):
self.add_text('[')
def depart_desc_optional(self, node):
self.add_text(']')
def visit_desc_annotation(self, node):
pass
def depart_desc_annotation(self, node):
pass
def visit_desc_content(self, node):
self.new_state()
self.add_text(self.nl)
def depart_desc_content(self, node):
self.end_state()
def visit_figure(self, node):
self.new_state()
def depart_figure(self, node):
self.end_state()
def visit_caption(self, node):
pass
def depart_caption(self, node):
pass
def visit_productionlist(self, node):
self.new_state()
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in node:
if production['tokenname']:
self.add_text(production['tokenname'].ljust(maxlen) + ' ::=')
lastname = production['tokenname']
elif lastname is not None:
self.add_text('%s ' % (' '*len(lastname)))
self.add_text(production.astext() + self.nl)
self.end_state(wrap=False)
raise nodes.SkipNode
def visit_footnote(self, node):
self._footnote = node.children[0].astext().strip()
self.new_state(len(self._footnote) + 3)
def depart_footnote(self, node):
self.end_state(first='[%s] ' % self._footnote)
def visit_citation(self, node):
if len(node) and isinstance(node[0], nodes.label):
self._citlabel = node[0].astext()
else:
self._citlabel = ''
self.new_state(len(self._citlabel) + 3)
def depart_citation(self, node):
self.end_state(first='[%s] ' % self._citlabel)
def visit_label(self, node):
raise nodes.SkipNode
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
# XXX: option list could use some better styling
def visit_option_list(self, node):
pass
def depart_option_list(self, node):
pass
def visit_option_list_item(self, node):
self.new_state(0)
def depart_option_list_item(self, node):
self.end_state()
def visit_option_group(self, node):
self._firstoption = True
def depart_option_group(self, node):
self.add_text(' ')
def visit_option(self, node):
if self._firstoption:
self._firstoption = False
else:
self.add_text(', ')
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.add_text(node['delimiter'])
def depart_option_argument(self, node):
pass
def visit_description(self, node):
pass
def depart_description(self, node):
pass
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_colspec(self, node):
self.table[0].append(node['colwidth'])
raise nodes.SkipNode
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
pass
def depart_thead(self, node):
pass
def visit_tbody(self, node):
self.table.append('sep')
def depart_tbody(self, node):
pass
def visit_row(self, node):
self.table.append([])
def depart_row(self, node):
pass
def visit_entry(self, node):
if 'morerows' in node or 'morecols' in node:
raise NotImplementedError('Column or row spanning cells are '
'not implemented.')
self.new_state(0)
def depart_entry(self, node):
text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop())
self.stateindent.pop()
self.table[-1].append(text)
def visit_table(self, node):
if self.table:
raise NotImplementedError('Nested tables are not supported.')
self.new_state(0)
self.table = [[]]
self
def depart_table(self, node):
lines = self.table[1:]
fmted_rows = []
colwidths = self.table[0]
realwidths = colwidths[:]
separator = 0
self.add_text('<!-- {} -->'.format(node.tagname))
# self.add_text('<!-- {} -->'.format(json.dumps(self.table)))
# don't allow paragraphs in table cells for now
# for line in lines:
# if line == 'sep':
# separator = len(fmted_rows)
# else:
# cells = []
# for i, cell in enumerate(line):
# par = my_wrap(cell, width=colwidths[i])
# if par:
# maxwidth = max(column_width(x) for x in par)
# else:
# maxwidth = 0
# realwidths[i] = max(realwidths[i], maxwidth)
# cells.append(par)
# fmted_rows.append(cells)
# def writesep(char='-'):
# out = ['+']
# for width in realwidths:
# out.append(char * (width+2))
# out.append('+')
# self.add_text(''.join(out) + self.nl)
# def writerow(row):
# lines = zip_longest(*row)
# for line in lines:
# out = ['|']
# for i, cell in enumerate(line):
# if cell:
# adjust_len = len(cell) - column_width(cell)
# out.append(' ' + cell.ljust(
# realwidths[i] + 1 + adjust_len))
# else:
# out.append(' ' * (realwidths[i] + 2))
# out.append('|')
# self.add_text(''.join(out) + self.nl)
# for i, row in enumerate(fmted_rows):
# if separator and i == separator:
# writesep('=')
# else:
# writesep('-')
# writerow(row)
# writesep('-')
self.table = None
self.end_state(wrap=False)
def visit_acks(self, node):
self.new_state(0)
self.add_text(', '.join(n.astext() for n in node.children[0].children) +
'.')
self.end_state()
raise nodes.SkipNode
def visit_image(self, node):
try:
image_name = '/'.join(node.attributes['uri'].split('/')[node.attributes['uri'].split('/').index('_static')-1:])
except ValueError as e:
print("Image not found where expected {}".format(node.attributes['uri']))
raise nodes.SkipNode
image_name = ''.join(image_name.split())
self.new_state(0)
if 'alt' in node.attributes:
self.add_text(''.format(node['alt'], image_name) + self.nl)
self.add_text(''.format(image_name) + self.nl)
self.end_state(False)
raise nodes.SkipNode
def visit_transition(self, node):
indent = sum(self.stateindent)
self.new_state(0)
self.add_text('=' * (MAXWIDTH - indent))
self.end_state()
raise nodes.SkipNode
def visit_bullet_list(self, node):
self.list_counter.append(-1)
def depart_bullet_list(self, node):
self.list_counter.pop()
def visit_enumerated_list(self, node):
self.list_counter.append(node.get('start', 1) - 1)
def depart_enumerated_list(self, node):
self.list_counter.pop()
def visit_definition_list(self, node):
self.list_counter.append(-2)
def depart_definition_list(self, node):
self.list_counter.pop()
def visit_list_item(self, node):
if self.list_counter[-1] == -1:
# bullet list
self.new_state(2)
elif self.list_counter[-1] == -2:
# definition list
pass
else:
# enumerated list
self.list_counter[-1] += 1
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node):
if self.list_counter[-1] == -1:
self.end_state(first='* ')
elif self.list_counter[-1] == -2:
pass
else:
self.end_state(first='%s. ' % self.list_counter[-1])
def visit_definition_list_item(self, node):
self._classifier_count_in_li = len(node.traverse(nodes.classifier))
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.new_state(0)
def depart_term(self, node):
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_termsep(self, node):
self.add_text(', ')
raise nodes.SkipNode
def visit_classifier(self, node):
self.add_text(' : ')
def depart_classifier(self, node):
self._classifier_count_in_li -= 1
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_definition(self, node):
self.new_state()
def depart_definition(self, node):
self.end_state()
def visit_field_list(self, node):
pass
def depart_field_list(self, node):
pass
def visit_field(self, node):
pass
def depart_field(self, node):
pass
def visit_field_name(self, node):
self.new_state(0)
def depart_field_name(self, node):
self.add_text(':')
self.end_state(end=None)
def visit_field_body(self, node):
self.new_state()
def depart_field_body(self, node):
self.end_state()
def visit_centered(self, node):
pass
def depart_centered(self, node):
pass
def visit_hlist(self, node):
pass
def depart_hlist(self, node):
pass
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_admonition(self, node):
self.new_state(0)
def depart_admonition(self, node):
self.end_state()
def _visit_admonition(self, node):
self.new_state(2)
if isinstance(node.children[0], nodes.Sequential):
self.add_text(self.nl)
def _make_depart_admonition(name):
def depart_admonition(self, node):
self.end_state(first=admonitionlabels[name] + ': ')
return depart_admonition
def _make_depart_alert_box(name):
def depart_alert_box(self, node):
self.clear_last_state()
MarkdownTranslator.resolve_reference_in_node(node)
lines = node.astext().split('\n')
quoteLines = ['> {0}\n>'.format(line) for line in lines]
mdStr = '\n> [!{0}]\n{1}'.format(name, '\n'.join(quoteLines))
self.add_text(mdStr)
return depart_alert_box
visit_attention = _visit_admonition
depart_attention = _make_depart_admonition('attention')
visit_caution = _visit_admonition
depart_caution = _make_depart_alert_box('CAUTION')
visit_danger = _visit_admonition
depart_danger = _make_depart_admonition('danger')
visit_error = _visit_admonition
depart_error = _make_depart_admonition('error')
visit_hint = _visit_admonition
depart_hint = _make_depart_admonition('hint')
visit_important = _visit_admonition
depart_important = _make_depart_alert_box('IMPORTANT')
visit_note = _visit_admonition
depart_note = _make_depart_alert_box('NOTE')
visit_tip = _visit_admonition
depart_tip = _make_depart_alert_box('TIP')
visit_warning = _visit_admonition
depart_warning = _make_depart_alert_box('WARNING')
visit_seealso = _visit_admonition
def depart_seealso(self, node):
self.end_state()
def visit_versionmodified(self, node):
self.new_state(0)
def depart_versionmodified(self, node):
self.end_state()
def visit_literal_block(self, node):
try:
include_language = None
include_lines = None
include_highlight = None
include_caption = None
path = self.builder.confdir
relative_path = node.attributes['source'][len(path)+1:]
if 'language' in node.attributes:
include_language = node.attributes['language']
if 'language' in node.attributes:
include_language = node.attributes['language']
if 'caption' in node.attributes:
include_caption = node.attributes['caption']
include_language = (('-' + include_language) if (include_language is not None) else '')
include_caption = (('"' + include_caption + '"') if (include_caption is not None) else '')
self.add_text('<!--[!code{}[Main]({} {})]-->'.format(include_language, relative_path, include_caption))
except KeyError as e:
pass
except ValueError as e:
pass
self.new_state(0)
self.add_text('<!-- {} {} -->'.format(node.tagname, json.dumps(node.attributes)))
self.end_state(wrap=False)
if 'language' in node.attributes:
self.add_text('````{}'.format(node.attributes['language']))
else:
self.add_text('````')
self.new_state()
def depart_literal_block(self, node):
self.add_text(self.nl + '````')
self.end_state(wrap=False)
def visit_doctest_block(self, node):
self.add_text(self.nl + '```')
self.new_state(0)
def depart_doctest_block(self, node):
self.add_text(self.nl + '```')
self.end_state(wrap=False)
def visit_line_block(self, node):
self.new_state()
self.lineblocklevel += 1
def depart_line_block(self, node):
self.lineblocklevel -= 1
self.end_state(wrap=False, end=None)
if not self.lineblocklevel:
self.add_text('\n')
def visit_line(self, node):
pass
def depart_line(self, node):
self.add_text('\n')
def visit_block_quote(self, node):
self.new_state()
def depart_block_quote(self, node):
self.end_state()
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.new_state(0)
def depart_paragraph(self, node):
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.end_state()
def visit_target(self, node):
if node.hasattr('refid'):
self.new_state(0)
self.add_text('<a name={}></a>'.format(node.attributes['refid']))
self.end_state()
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_toctree(self, node):
raise nodes.SkipNode
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_pending_xref(self, node):
if 'refdomain' in node.attributes and node.attributes['refdomain'] == 'py':
self.add_text('<xref:{}>'.format(node.attributes['reftarget']))
raise nodes.SkipNode
def depart_pending_xref(self, node):
pass
@classmethod
def _resolve_reference(cls, node):
ref_string = None
raw_ref_tilde_template = ":class:`~{0}`"
raw_ref_template = ":class:`{0}`"
if 'refid' in node.attributes:
ref_string = cls.xref_template.format(node.attributes['refid'])
elif 'refuri' in node.attributes:
if 'http' in node.attributes['refuri'] or node.attributes['refuri'][0] == '/':
ref_string = '[{}]({})'.format(node.astext(), node.attributes['refuri'])
else:
# only use id in class and func refuri if its id exists
# otherwise, remove '.html#' in refuri
# uri_fields[1] is class or function uid. e.g:
# case 0 - [module]#[class-uid] (go to if block to use class-uid instead)
# case 1 - [module]#module-[module] (go to else block to remove '.html#' in refuri)
# case 2 - [class]# (go to else block to remove path and '.html#' in refuri)
uri_fields = node.attributes['refuri'].split('#')
if len(uri_fields) > 1 and uri_fields[1] and not uri_fields[1].startswith('module'):
node.attributes['refuri'] = uri_fields[1]
else:
fname = os.path.split(node.attributes['refuri'])[-1]
pos = fname.find('.html')
if pos != -1:
node.attributes['refuri'] = fname[0: pos]
if node.parent.rawsource == raw_ref_tilde_template.format(node.attributes['refuri']) or node.parent.rawsource == raw_ref_template.format(node.attributes['refuri']) or node.parent.tagname == 'document':
ref_string = node.attributes['refuri']
else:
ref_string = cls.xref_template.format(node.attributes['refuri'])
else:
ref_string = '{}<!-- {} -->'.format(node.tagname, json.dumps(node.attributes))
return ref_string
def visit_reference(self, node):
ref_string = MarkdownTranslator._resolve_reference(node)
self.add_text(ref_string)
raise nodes.SkipNode
def depart_reference(self, node):
pass
def visit_number_reference(self, node):
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_emphasis(self, node):
self.add_text('*')
def depart_emphasis(self, node):
self.add_text('*')
def visit_literal_emphasis(self, node):
self.add_text('*')
def depart_literal_emphasis(self, node):
self.add_text('*')
def visit_strong(self, node):
self.add_text('**')
def depart_strong(self, node):
self.add_text('**')
def visit_literal_strong(self, node):
self.add_text('**')
def depart_literal_strong(self, node):
self.add_text('**')
def visit_abbreviation(self, node):
self.add_text('')
def depart_abbreviation(self, node):
if node.hasattr('explanation'):
self.add_text(' (%s)' % node['explanation'])
def visit_title_reference(self, node):
self.add_text('*')
def depart_title_reference(self, node):
self.add_text('*')
def visit_literal(self, node):
self.add_text('`')
def depart_literal(self, node):
self.add_text('`')
def visit_subscript(self, node):
self.add_text('_')
def depart_subscript(self, node):
pass
def visit_superscript(self, node):
self.add_text('^')
def depart_superscript(self, node):
pass
def visit_footnote_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_citation_reference(self, node):
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_Text(self, node):
self.add_text(node.astext())
def depart_Text(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_inline(self, node):
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def depart_inline(self, node):
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_problematic(self, node):
self.add_text('>>')
def depart_problematic(self, node):
self.add_text('<<')
def visit_system_message(self, node):
print(bcolors.WARNING + "System message warnings: %s" % node.astext() + bcolors.ENDC)
raise nodes.SkipNode
def visit_comment(self, node):
raise nodes.SkipNode
def visit_meta(self, node):
# only valid for HTML
raise nodes.SkipNode
def visit_raw(self, node):
if 'text' in node.get('format', '').split():
self.new_state(0)
self.add_text(node.astext())
self.end_state(wrap = False)
raise nodes.SkipNode
def visit_math(self, node):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html',
(self.builder.env.docname, node.line))
raise nodes.SkipNode
visit_math_block = visit_math
def visit_substitution_reference(self, node):
pass
def depart_substitution_reference(self, node):
pass
visit_remarks = remarks.visit_remarks
depart_remarks = remarks.depart_remarks
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
|
rebost/django
|
refs/heads/master
|
tests/regressiontests/generic_inline_admin/tests.py
|
8
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.contenttypes.generic import (
generic_inlineformset_factory, GenericTabularInline)
from django.forms.models import ModelForm
from django.test import TestCase
from django.test.utils import override_settings
# local test models
from .admin import MediaInline, MediaPermanentInline
from .models import (Episode, EpisodeExtra, EpisodeMaxNum, Media,
EpisodePermanent, Category)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericAdminViewTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
# set TEMPLATE_DEBUG to True to ensure {% include %} will raise
# exceptions since that is how inlines are rendered and #9498 will
# bubble up if it is an issue.
self.original_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def tearDown(self):
self.client.logout()
settings.TEMPLATE_DEBUG = self.original_template_debug
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/add/')
self.assertEqual(response.status_code, 200)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk)
self.assertEqual(response.status_code, 200)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/episode/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = '/generic_inline_admin/admin/generic_inline_admin/episode/%d/' % self.episode_pk
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testGenericInlineFormset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="text" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="text" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="text" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def testGenericInlineFormsetFactory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminParametersTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def testNoParam(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episode/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def testExtraParam(self):
"""
With extra=0, there should be one form.
"""
e = self._create_object(EpisodeExtra)
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodeextra/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
e = self._create_object(EpisodeMaxNum)
inline_form_data = '<input type="hidden" name="generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" value="2" id="id_generic_inline_admin-media-content_type-object_id-TOTAL_FORMS" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" value="1" id="id_generic_inline_admin-media-content_type-object_id-INITIAL_FORMS" />'
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/episodemaxnum/%s/' % e.pk)
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class GenericInlineAdminWithUniqueTogetherTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
fixtures = ['users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdd(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get('/generic_inline_admin/admin/generic_inline_admin/contact/add/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/generic_inline_admin/admin/generic_inline_admin/contact/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
class NoInlineDeletionTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
def test_no_deletion(self):
fake_site = object()
inline = MediaPermanentInline(EpisodePermanent, fake_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class GenericInlineModelAdminTest(TestCase):
urls = "regressiontests.generic_inline_admin.urls"
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, None)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['description', 'keywords', 'id', 'DELETE'])
|
rlugojr/rekall
|
refs/heads/master
|
rekall-agent/rekall_agent/policies/files.py
|
1
|
#!/usr/bin/env python2
# Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Author: Michael Cohen scudette@google.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__author__ = "Michael Cohen <scudette@google.com>"
"""A Files based policy.
This uses local files on the same system to run the client/server. It can not be
used across different systems but it is a nice way to test local installations.
Main policy:
- The jobs queues are implemented as files in the following directory:
{root_path}/{client_id}/jobs
"""
import os
from rekall_agent.config import agent
from rekall_agent.locations import files
class FileBasedServerPolicy(agent.ServerPolicy):
"""This server deployment policy is centered around files.
It can only work locally on the same machine since this produces Location
objects that refer to the local filesystem. It is sufficient, however, to
test the system end to end.
"""
schema = [
dict(name="root_path",
doc="The root path we use to store the entire installation.")
]
def jobs_queue_for_client(self, client_id):
"""Returns a Location for the client's job queue."""
return files.FileLocation.from_keywords(
session=self._session, path=os.path.join(
self.root_path, client_id, "jobs"))
def get_client_vfs_path(self, client_id, path):
"""Returns a Location for storing the path in the client's VFS area."""
return files.FileLocation.from_keywords(
session=self._session, path=os.path.join(
self.root_path, client_id, "vfs", path.lstrip(os.path.sep)))
def get_client_vfs_prefix(self, client_id, path):
"""Returns a Location suitable for storing a path using the prefix."""
return files.FileLocation.from_keywords(
session=self._session, path=os.path.join(
self.root_path, client_id, "vfs", path.lstrip(os.path.sep)))
def get_ticket_location(self, client_id, flow_id):
"""Returns a Location for the client to write flow tickets.
When we issue requests to the client, we need to allow the client to
report progress about the progress of the flow requests running on the
client. We do this by instructing the client to write a "Flow Ticket" to
the ticket location.
"""
return files.FileLocation.from_keywords(
session=self._session, path=os.path.join(
self.root_path, client_id, "flows",
flow_id + "." + "ticket"))
def get_flow_metadata_collection(self, client_id):
return files.FileLocation.from_keywords(
session=self._session, path=os.path.join(
self.root_path, client_id, "flows.sqlite"))
class FileBasedAgentPolicy(agent.ClientPolicy):
"""A policy controller for a simple file based agent."""
schema = [
dict(name="root_path",
doc="The root path for the entire installation."),
]
def get_jobs_queue(self):
"""Returns a Location object for reading the jobs queue."""
return files.FileLocation.from_keywords(
session=self._session, path=os.path.join(
self.root_path, self.client_id, "jobs"))
|
FireWRT/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/host/lib/python2.7/test/test_unary.py
|
137
|
"""Test compiler changes for unary ops (+, -, ~) introduced in Python 2.2"""
import unittest
from test.test_support import run_unittest, have_unicode
class UnaryOpTestCase(unittest.TestCase):
def test_negative(self):
self.assertTrue(-2 == 0 - 2)
self.assertTrue(-0 == 0)
self.assertTrue(--2 == 2)
self.assertTrue(-2L == 0 - 2L)
self.assertTrue(-2.0 == 0 - 2.0)
self.assertTrue(-2j == 0 - 2j)
def test_positive(self):
self.assertTrue(+2 == 2)
self.assertTrue(+0 == 0)
self.assertTrue(++2 == 2)
self.assertTrue(+2L == 2L)
self.assertTrue(+2.0 == 2.0)
self.assertTrue(+2j == 2j)
def test_invert(self):
self.assertTrue(-2 == 0 - 2)
self.assertTrue(-0 == 0)
self.assertTrue(--2 == 2)
self.assertTrue(-2L == 0 - 2L)
def test_no_overflow(self):
nines = "9" * 32
self.assertTrue(eval("+" + nines) == eval("+" + nines + "L"))
self.assertTrue(eval("-" + nines) == eval("-" + nines + "L"))
self.assertTrue(eval("~" + nines) == eval("~" + nines + "L"))
def test_negation_of_exponentiation(self):
# Make sure '**' does the right thing; these form a
# regression test for SourceForge bug #456756.
self.assertEqual(-2 ** 3, -8)
self.assertEqual((-2) ** 3, -8)
self.assertEqual(-2 ** 4, -16)
self.assertEqual((-2) ** 4, 16)
def test_bad_types(self):
for op in '+', '-', '~':
self.assertRaises(TypeError, eval, op + "'a'")
if have_unicode:
self.assertRaises(TypeError, eval, op + "u'a'")
self.assertRaises(TypeError, eval, "~2j")
self.assertRaises(TypeError, eval, "~2.0")
def test_main():
run_unittest(UnaryOpTestCase)
if __name__ == "__main__":
test_main()
|
h3biomed/ansible
|
refs/heads/h3
|
lib/ansible/modules/network/fortios/fortios_vpn_ipsec_forticlient.py
|
21
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_vpn_ipsec_forticlient
short_description: Configure FortiClient policy realm in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify vpn_ipsec feature and forticlient category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
vpn_ipsec_forticlient:
description:
- Configure FortiClient policy realm.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
phase2name:
description:
- Phase 2 tunnel name that you defined in the FortiClient dialup configuration. Source vpn.ipsec.phase2.name vpn.ipsec.phase2-interface
.name.
realm:
description:
- FortiClient realm name.
required: true
status:
description:
- Enable/disable this FortiClient configuration.
choices:
- enable
- disable
usergroupname:
description:
- User group name for FortiClient users. Source user.group.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure FortiClient policy realm.
fortios_vpn_ipsec_forticlient:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
vpn_ipsec_forticlient:
state: "present"
phase2name: "<your_own_value> (source vpn.ipsec.phase2.name vpn.ipsec.phase2-interface.name)"
realm: "<your_own_value>"
status: "enable"
usergroupname: "<your_own_value> (source user.group.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_vpn_ipsec_forticlient_data(json):
option_list = ['phase2name', 'realm', 'status',
'usergroupname']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def vpn_ipsec_forticlient(data, fos):
vdom = data['vdom']
vpn_ipsec_forticlient_data = data['vpn_ipsec_forticlient']
flattened_data = flatten_multilists_attributes(vpn_ipsec_forticlient_data)
filtered_data = filter_vpn_ipsec_forticlient_data(flattened_data)
if vpn_ipsec_forticlient_data['state'] == "present":
return fos.set('vpn.ipsec',
'forticlient',
data=filtered_data,
vdom=vdom)
elif vpn_ipsec_forticlient_data['state'] == "absent":
return fos.delete('vpn.ipsec',
'forticlient',
mkey=filtered_data['realm'],
vdom=vdom)
def fortios_vpn_ipsec(data, fos):
login(data)
if data['vpn_ipsec_forticlient']:
resp = vpn_ipsec_forticlient(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"vpn_ipsec_forticlient": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"phase2name": {"required": False, "type": "str"},
"realm": {"required": True, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"usergroupname": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
alexdzul/myPage
|
refs/heads/master
|
myPage/apps/social/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
mdanielwork/intellij-community
|
refs/heads/master
|
python/testData/resolve/AttributeAssignedNearby.py
|
83
|
def foo(bar):
bar.xyzzy = 1
print bar.xyzzy
# <ref>
|
HealthAPI/helmet
|
refs/heads/develop
|
node_modules/grunt-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/input_test.py
|
604
|
#!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['a'])],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['a'])],
self.nodes['a'].FindCycles())
self.assertEquals([(self.nodes['b'], self.nodes['a'], self.nodes['b'])],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
(self.nodes['a'], self.nodes['b'], self.nodes['a']) in cycles)
self.assertTrue(
(self.nodes['b'], self.nodes['c'], self.nodes['b']) in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([(self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a'])],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
|
kamni/nodonuts
|
refs/heads/master
|
nodonuts/urls.py
|
1
|
from constance import config
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView
from organizations.forms import NoDonutsAuthForm
admin.autodiscover()
urlpatterns = patterns('',
url(r'', include('recipes.urls')),
url(r'^about/$', TemplateView.as_view(template_name="about.html"), name="about"),
url(r'^auth/login/$', 'django.contrib.auth.views.login', {'authentication_form': NoDonutsAuthForm}, 'login'),
url(r'^auth/', include('django.contrib.auth.urls')),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, 'logout'),
url(r'^members/', include('organizations.urls')),
url(r'^site-manager/django/', include(admin.site.urls)),
url(r'^site-manager/', include('site_manager.urls')),
url(r'^social/', include('social.apps.django_app.urls', namespace='social')),
url(r'^textedit/', include('scribbler.urls')),
url(r'^tinymce/', include('tinymce.urls')),
)
if settings.INCLUDE_DOC_URLS:
urlpatterns += (url(r'^docs/', include('sphinxdoc.urls')),)
if config.DISPLAY_TERMS_AND_CONDITIONS:
urlpatterns += (url(r'^terms-and-conditions/$',
TemplateView.as_view(template_name="tos.html"),
name="terms"),)
if config.DISPLAY_PRIVACY_POLICY:
urlpatterns += (url(r'^privacy-policy/$',
TemplateView.as_view(template_name="privacy.html"),
name="privacy"),)
|
ACJTeam/enigma2
|
refs/heads/master
|
lib/python/Tools/Notifications.py
|
12
|
notifications = [ ]
notificationAdded = [ ]
# notifications which are currently on screen (and might be closed by similiar notifications)
current_notifications = [ ]
def __AddNotification(fnc, screen, id, *args, **kwargs):
if ".MessageBox'>" in `screen`:
kwargs["simple"] = True
if ".Standby'>" in `screen`:
removeCIdialog()
notifications.append((fnc, screen, args, kwargs, id))
for x in notificationAdded:
x()
def AddNotification(screen, *args, **kwargs):
AddNotificationWithCallback(None, screen, *args, **kwargs)
def AddNotificationWithCallback(fnc, screen, *args, **kwargs):
__AddNotification(fnc, screen, None, *args, **kwargs)
def AddNotificationParentalControl(fnc, screen, *args, **kwargs):
RemovePopup("Parental control")
__AddNotification(fnc, screen, "Parental control", *args, **kwargs)
def AddNotificationWithID(id, screen, *args, **kwargs):
__AddNotification(None, screen, id, *args, **kwargs)
# we don't support notifications with callback and ID as this
# would require manually calling the callback on cancelled popups.
def RemovePopup(id):
# remove similiar notifications
print "RemovePopup, id =", id
for x in notifications:
if x[4] and x[4] == id:
print "(found in notifications)"
notifications.remove(x)
for x in current_notifications:
if x[0] == id:
print "(found in current notifications)"
x[1].close()
from Screens.MessageBox import MessageBox
def AddPopup(text, type, timeout, id = None):
if id is not None:
RemovePopup(id)
print "AddPopup, id =", id
AddNotificationWithID(id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = True)
def removeCIdialog():
import NavigationInstance
if NavigationInstance.instance and NavigationInstance.instance.wasTimerWakeup():
import Screens.Ci
for slot in Screens.Ci.CiHandler.dlgs:
if hasattr(Screens.Ci.CiHandler.dlgs[slot], "forceExit"):
Screens.Ci.CiHandler.dlgs[slot].tag = "WAIT"
Screens.Ci.CiHandler.dlgs[slot].forceExit()
|
escattone/kuma
|
refs/heads/master
|
kuma/core/migrations/0006_create_l10n_survey_banner_flag.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_l10n_survey_banner_flag(apps, schema_editor):
Flag = apps.get_model("waffle", "Flag")
if not Flag.objects.filter(name="l10n_survey").exists():
Flag.objects.create(
name="l10n_survey", staff=True, note="Shows the l10n survey banner"
)
class Migration(migrations.Migration):
dependencies = [
("core", "0005_auto_20200409_1312"),
# This is needed otherwise `apps.get_model('waffle', 'Flag')`
# will raise a Django app LookupError.
("waffle", "0001_initial"),
]
operations = [migrations.RunPython(create_l10n_survey_banner_flag)]
|
Southpaw-TACTIC/TACTIC
|
refs/heads/4.7
|
src/tactic/ui/app/diagnostics_wdg.py
|
1
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = ['DiagnosticsWdg', 'DiagnosticsHandoffDirTestCmd']
import os
from pyasm.common import Environment, Config, TacticException
from pyasm.command import Command
from pyasm.web import DivWdg, WebContainer
from pyasm.widget import IconButtonWdg, IconWdg, CheckboxWdg
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.container import RoundedCornerDivWdg
class DiagnosticsWdg(BaseRefreshWdg):
ARGS_KEYS = {
}
def get_display(self):
top = DivWdg()
self.set_as_panel(top)
title_div = DivWdg()
title_div.add_class("maq_search_bar")
title_div.add("Diagnostics")
top.add(title_div)
tool_div = DivWdg()
top.add(tool_div)
refresh = IconButtonWdg("Refresh", IconWdg.REFRESH)
refresh.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_panel");
spt.panel.refresh(top);
'''
} )
tool_div.add(refresh)
content = RoundedCornerDivWdg(hex_color_code="2F2F2F",corner_size="10")
content.set_dimensions( width_str='300px', content_height_str=None )
top.add(content)
server_title_div = DivWdg()
server_title_div.add_class("maq_search_bar")
content.add(server_title_div)
server_title_div.add("Server")
server_content_div = DivWdg()
server_content_div.add_style("padding: 10px")
server_content_div.add(self.get_ping_wdg())
server_content_div.add(self.get_load_balance_wdg())
content.add(server_content_div)
database_title_div = DivWdg()
database_title_div.add_class("maq_search_bar")
content.add(database_title_div)
database_title_div.add("Database")
database_content_div = DivWdg()
database_content_div.add_style("padding: 10px")
database_content_div.add(self.get_database_wdg())
content.add(database_content_div)
checkin_title_div = DivWdg()
checkin_title_div.add_class("maq_search_bar")
content.add(checkin_title_div)
checkin_title_div.add("Database")
checkin_content_div = DivWdg()
checkin_content_div.add_style("padding: 10px")
checkin_content_div.add(self.get_asset_dir_wdg() )
checkin_content_div.add(self.get_asset_management_wdg())
content.add(checkin_content_div)
return top
def get_ping_wdg(self):
div = DivWdg()
div.add_class("spt_diagnostics_ping")
ping_div = DivWdg()
div.add(ping_div)
ping_div.add( CheckboxWdg() )
ping_div.add_behavior( {
'type': 'load',
'cbjs_action': '''
var server = TacticServerStub.get();
var result = server.ping();
var msg = 'wow';
var status_el = spt.get_cousin(bvr.src_el, ".spt_diagnostics_ping",".spt_diagnostics_ping_status");
status_el.innerHTML = result;
'''
} )
# Test database connection
ping_div.add("Test Server Ping")
status_div = DivWdg()
status_div.add_class("spt_diagnostics_ping_status")
status_div.add("Checking ...")
div.add(status_div)
return div
def get_asset_dir_wdg(self):
div = DivWdg()
asset_dir_div = DivWdg()
div.add(asset_dir_div)
asset_dir_div.add( CheckboxWdg() )
asset_dir_div.add("Test Asset Directory")
status = self.test_asset_dir()
status_div = DivWdg()
status_div.add_class("spt_diagnostics_asset_dir")
status_div.add(status)
div.add(status_div)
return div
def test_asset_dir(self):
asset_dir = "/home/apache/assets"
status = 'OK'
exists = os.path.exists(asset_dir)
if not exists:
status = "Error: asset_dir [%s] does not exist" % asset_dir
# test writing a file
file_name = ".test.txt"
path = "%s/%s" % (asset_dir, file_name)
try:
f = open(path)
f.write("test.txt", 'w')
f.close()
except Exception as e:
status = "Error: can't write to asset folder"
return status
def get_database_wdg(self):
div = DivWdg()
database_div = DivWdg()
div.add(database_div)
database_div.add( CheckboxWdg() )
# Test database connection
database_div.add("Test Database Connection")
status_div = DivWdg()
status_div.add_class("spt_diagnostics_database")
status_div.add("Checking ...")
div.add(status_div)
return div
def get_load_balance_wdg(self):
div = DivWdg()
div.add_class("spt_diagnostics_load_balance")
load_div = DivWdg()
div.add(load_div)
load_div.add_behavior( {
'type': 'load',
'cbjs_action': '''
var server = TacticServerStub.get();
var ports = {};
var num_ports = 0;
for (var i=0; i<10; i++) {
var info = server.get_connection_info();
var port = info.port;
if (typeof(ports[port]) == 'undefined') {
ports[port] = 0;
num_ports += 1;
}
ports[port] += 1
}
var msg = "Number of ports: "+num_ports;
var status_el = spt.get_cousin(bvr.src_el, ".spt_diagnostics_load_balance",".spt_diagnostics_load_status");
status_el.innerHTML = "OK - "+msg;
'''
} )
# Test load balancing
load_div.add( CheckboxWdg() )
load_div.add("Test Load Balancing")
load_status_div = DivWdg()
load_status_div.add_class("spt_diagnostics_load_status")
load_status_div.add("Checking ...")
div.add(load_status_div)
return div
def get_asset_management_wdg(self):
div = DivWdg()
div.add_class("spt_diagnostics_dam")
handoff_div = DivWdg()
handoff_div.add_behavior( {
'type': 'load',
'cbjs_action': '''
var server = TacticServerStub.get();
var handoff_dir = server.get_handoff_dir();
var applet = spt.Applet.get();
applet.makedirs(handoff_dir);
var random_number=Math.floor(Math.random()*100)
var filename = 'test'+random_number+'.txt';
applet.create_file(handoff_dir+'/'+filename, 'test');
var cmd = 'tactic.ui.app.DiagnosticsHandoffDirTestCmd';
var args = {
handoff_dir: handoff_dir,
filename: filename
};
server.execute_cmd(cmd, args);
var status_el = spt.get_cousin(bvr.src_el, ".spt_diagnostics_dam",".spt_diagnostics_handoff_status");
status_el.innerHTML = "OK";
'''
} )
# Test handoff directory
div.add(handoff_div)
handoff_div.add( CheckboxWdg() )
handoff_div.add("Test Handoff Directory")
handoff_status_div = DivWdg()
handoff_status_div.add_class("spt_diagnostics_handoff_status")
handoff_status_div.add("Checking ...")
div.add(handoff_status_div)
return div
class DiagnosticsHandoffDirTestCmd(Command):
def execute(self):
handoff_dir = self.kwargs.get("handoff_dir")
filename = self.kwargs.get("filename")
client_path = "%s/%s" % (handoff_dir, filename)
web = WebContainer.get_web()
server_handoff_dir = web.get_server_handoff_dir()
# look for a "test.txt" file
if not os.path.exists(server_handoff_dir):
raise TacticException("Server cannot find handoff dir [%s]" % server_handoff_dir)
path = "%s/%s" % (server_handoff_dir, filename)
if not os.path.exists(path):
raise TacticException("Server cannot find test.txt [%s]" % path)
f = open(path)
line = f.readline()
f.close()
if line != 'test':
raise TacticException("File [%s] is not correct" % client_path)
|
cabrer7/PyWignerCUDA
|
refs/heads/master
|
GPU_Wigner2D_GPitaevskii.py
|
1
|
#!/usr/local/epd/bin/python
#-----------------------------------------------------------------------------
# Time independent Quantum propagator by FFT Split operator
#
#-----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot
import scipy.fftpack as fftpack
import h5py
from scipy.special import laguerre
from scipy.special import hyp1f1
from scipy.special import legendre
import time
import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
import cufft_wrapper as cuda_fft
# ===========================================================================
expPotential_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
{CUDA_constants}
__device__ double Heaviside(double x)
{{
if( x < 0. ) return 0.;
else return 1.;
}}
__device__ double Potential(double t, double x)
{{
return {potentialString};
}}
__device__ double Potential_XTheta(double t, double x, double theta)
{{
return (Potential(t, x - hBar*theta/2.) - Potential(t, x + hBar*theta/2.))/hBar;
}}
__device__ double Bloch_Potential_XTheta(double t, double x, double theta)
{{
return Potential(t, x - hBar*theta/2.) + Potential(t, x + hBar*theta/2.);
}}
__global__ void Kernel( double t_GPU, pycuda::complex<double> *B )
{{
// x runs on thread-blocks and p runs on the grid
double t = t_GPU;
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) % P_DIM ;
const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) % X_DIM ;
const double x = dx*( j - 0.5*X_DIM );
const double theta = dtheta*( i - 0.5*P_DIM );
double phase = 0.5*dt*Potential_XTheta(t, x, theta);
double r = exp( - 0.5*D_Theta*theta*theta*dt );
B[ indexTotal ] *= pycuda::complex<double>( r*cos(phase) , -r*sin(phase) );
double x_max = dx*(X_DIM-1.)/2.;
B[indexTotal] *= 1. - exp( - pow(x-x_max,2)/pow(10.*dx,2) );
B[indexTotal] *= 1. - exp( - pow(x+x_max,2)/pow(10.*dx,2) );
}}
__global__ void Kernel_Bloch( double dt_GPU, double t_GPU, pycuda::complex<double> *B )
{{
// x runs on thread-blocks and p runs on the grid
double t = t_GPU;
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) % P_DIM ;
const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) % X_DIM ;
const double x = dx*( j - 0.5*X_DIM );
const double theta = dtheta*( i - 0.5*P_DIM );
B[ indexTotal ] *= exp( -0.25*dt_GPU*Bloch_Potential_XTheta(t, x, theta) );
//double x_max = dx*(X_DIM-1.)/2.;
//B[indexTotal] *= 1. - exp( - pow(x-x_max,2)/pow(10.*dx,2) );
//B[indexTotal] *= 1. - exp( - pow(x+x_max,2)/pow(10.*dx,2) );
}}
"""
#------------------------------------------------------------
expPLambdaKinetic_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
{CUDA_constants}
__device__ double KineticEnergy( double p )
{{
return {kinematicString};
}}
__device__ double KineticEnergy_plambda( double p, double lambda)
{{
return ( KineticEnergy(p + hBar*lambda/2.) - KineticEnergy( p - hBar*lambda/2.) )/hBar;
}}
__device__ double Bloch_KineticEnergy_plambda( double p, double lambda)
{{
return KineticEnergy(p + hBar*lambda/2.) + KineticEnergy( p - hBar*lambda/2.) ;
}}
__global__ void Kernel( pycuda::complex<double> *B )
{{
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) % P_DIM ;
const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) % X_DIM ;
double lambda = dlambda*(j - 0.5*X_DIM );
double p = dp*(i - 0.5*P_DIM );
double phase = dt*KineticEnergy_plambda( p , lambda );
double r = exp( - D_Lambda*lambda*lambda );
B[ indexTotal ] *= pycuda::complex<double>( r*cos(phase), -r*sin(phase) );
}}
__global__ void Kernel_Bloch( double dt_GPU, pycuda::complex<double> *B )
{{
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) % P_DIM ;
const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) % X_DIM ;
double lambda = dlambda*(j - 0.5*X_DIM );
double p = dp*(i - 0.5*P_DIM );
B[ indexTotal ] *= exp( - 0.5*dt_GPU*Bloch_KineticEnergy_plambda( p , lambda ) );
}}
"""
#------------------------------------------------------------
zero_negative_source = """
#include <pycuda-complex.hpp>
#include<math.h>
__global__ void Kernel( pycuda::complex<double> *Bout, pycuda::complex<double> *B )
{
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
if( B[indexTotal].real() < 0. )
Bout[ indexTotal ] = B[ indexTotal ];
else
Bout[ indexTotal ] = 0.;
}
"""
fft_shift_source = """
#include <pycuda-complex.hpp>
#include<math.h>
__global__ void Kernel( pycuda::complex<double> *Bout, pycuda::complex<double> *B )
{
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) %% P_DIM ;
const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) %% X_DIM ;
const int jj = blockIdx.y + gridDim.x*blockIdx.x;
const int ii = threadIdx.x;
Bout[ jj + ii*X_DIM ] = B[ j + i*X_DIM ];
}
"""
#---------------------------------------------------------------
dampingLaxWendorf_source = """
#include <pycuda-complex.hpp>
#include <math.h>
%s
//
// Caldeira Legget damping by finite differences
//
__global__ void Kernel( pycuda::complex<double> *B0 )
//
// Caldeira Leggett damping
{
pycuda::complex<double> I(0., 1.);
pycuda::complex<double> B_plus;
pycuda::complex<double> B_minus;
pycuda::complex<double> B_;
pycuda::complex<double> B_plus_half;
pycuda::complex<double> B_minus_half;
int X_DIM = blockDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) %% P_DIM ;
const int j = (threadIdx.x + X_DIM/2) %% X_DIM ;
int ip=i+1, im=i-1;
int jp=j+1, jm=j-1;
const double theta = dtheta*(j - 0.5*P_DIM );
const double x = dx*(i - 0.5*X_DIM );
const double theta_plus = dtheta*(jp - 0.5*P_DIM );
const double theta_minus = dtheta*(jm - 0.5*P_DIM );
const double x_plus = dx*(ip - 0.5*X_DIM );
const double x_minus = dx*(im - 0.5*X_DIM );
// -\gamma \theta \partial_{ \theta}
if( j>0 && j < X_DIM-1 ){
B_plus = B0[jp + i*X_DIM];
B_minus = B0[jm + i*X_DIM];
B_ = B0[j + i*X_DIM];
B_plus_half = 0.5*(B_plus + B_) - 0.5*gammaDamping*(dt/2.)*(theta_plus +theta)/2.*(B_plus - B_)/dtheta;
B_minus_half = 0.5*(B_minus + B_) - 0.5*gammaDamping*(dt/2.)*(theta_minus+theta)/2.*(B_ - B_minus)/dtheta;
B0[ j + i*X_DIM ] = B_ - 0.5*gammaDamping*dt*theta*( B_plus_half - B_minus_half )/dtheta;
}
//B1[0 + i*X_DIM ] = pycuda::complex<double>(0.,0.);
//B1[P_DIM-1 + i*X_DIM ] = pycuda::complex<double>(0.,0.);
}
"""
copy_gpuarray_source = """
#include <pycuda-complex.hpp>
#include<math.h>
__global__ void Kernel(pycuda::complex<double> *W_new , pycuda::complex<double> *W)
{
int X_DIM = blockDim.x*gridDim.x;
//int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
W_new[indexTotal] = W[indexTotal];
}
"""
square_gpuarray_source = """
#include <pycuda-complex.hpp>
#include<math.h>
__global__ void Kernel(pycuda::complex<double> *W_new , pycuda::complex<double> *W)
{
int X_DIM = blockDim.x*gridDim.x;
//int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
W_new[indexTotal] = pow(W[indexTotal],2);
}
"""
sum_stride_source = """
#include <pycuda-complex.hpp>
#include<math.h>
__global__ void Kernel(pycuda::complex<double> *W, pycuda::complex<double> *W_sum, int m)
{
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int k = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM; // indexTotal
int q = int( pow(2., m) );
if( k%(2*q) == 0 ){
W[ k ] += W[ k + q ];
W_sum[0] = W[0];
}
}
"""
gpuarray_copy_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
__global__ void Kernel( pycuda::complex<double> *B_out , pycuda::complex<double> *B_in )
{
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
B_out[ indexTotal ] = B_in[ indexTotal ];
}
"""
# F = - sign(p)f(p)
theta_fp_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__device__ double f( double p)
{
return %s;
}
__global__ void Kernel( pycuda::complex<double> *B )
{
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) %% P_DIM ;
//const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) %% X_DIM ;
double p = dp*(i - 0.5*P_DIM );
if( p >= 0. )
B[ indexTotal ] *= f(p);
else
B[ indexTotal ] *= -f(p);
}
"""
# Non-linear phase space
gpu_sum_axis0_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel( pycuda::complex<double> *Probability_x , pycuda::complex<double> *W , int P_DIM)
{
int X_DIM = blockDim.x*gridDim.x;
const int index_x = threadIdx.x + blockDim.x*blockIdx.x ;
pycuda::complex<double> sum=0.;
for(int i=0; i<P_DIM; i++ )
sum += W[ index_x + i*X_DIM ];
Probability_x[ index_x ] = pycuda::real(sum);
}
"""
gpu_sum_axis1_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel( pycuda::complex<double> *Probability_p , pycuda::complex<double> *W , int X_DIM)
{
int P_DIM = blockDim.x*gridDim.x;
const int index_p = (threadIdx.x + blockDim.x*blockIdx.x)*X_DIM ;
pycuda::complex<double> sum=0.;
for(int j=0; j<X_DIM; j++ )
sum += W[ index_p + j ];
Probability_p[ index_p ] = pycuda::real(sum);
}
"""
roll_FirstRowCopy_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
%s
__global__ void Kernel( pycuda::complex<double> *W, pycuda::complex<double> *Probability_X , int P_DIM)
{
int X_DIM = blockDim.x*gridDim.x;
const int index_x = threadIdx.x + blockDim.x*blockIdx.x ;
pycuda::complex<double> firstRow = Probability_X[index_x];
for(int i=0; i<P_DIM; i++ ) W[ index_x + i*X_DIM ] = firstRow;
}
"""
expPotential_GrossPitaevskii_source = """
#include <pycuda-complex.hpp>
#include<math.h>
#define _USE_MATH_DEFINES
{CUDA_constants}
__global__ void Kernel( double t_GPU,
pycuda::complex<double> *B, pycuda::complex<double> *ProbMinus, pycuda::complex<double> *ProbPlus)
{{
double t = t_GPU;
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) % P_DIM ;
const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) % X_DIM ;
const double x = dx*( j - 0.5*X_DIM );
const double theta = dtheta*( i - 0.5*P_DIM );
double phase = -0.5*dt*a_GP * pycuda::real<double>( ProbMinus[indexTotal] - ProbPlus[indexTotal] )/hBar;
B[ indexTotal ] *= pycuda::complex<double>( cos(phase) , sin(phase) );
}}
__global__ void Kernel_Bloch(
double dt_GPU, pycuda::complex<double> *B, pycuda::complex<double> *ProbMinus, pycuda::complex<double> *ProbPlus)
{{
//double t = t_GPU;
int X_DIM = blockDim.x*gridDim.x;
int P_DIM = gridDim.y;
const int indexTotal = threadIdx.x + blockDim.x*blockIdx.x + blockIdx.y*X_DIM;
const int i = (blockIdx.y + P_DIM/2) % P_DIM ;
const int j = (threadIdx.x + blockDim.x*blockIdx.x + X_DIM/2) % X_DIM ;
const double x = dx*( j - 0.5*X_DIM );
const double theta = dtheta*( i - 0.5*P_DIM );
B[ indexTotal ] *= exp( -0.25*dt_GPU*a_GP * ( ProbMinus[indexTotal] + ProbPlus[indexTotal] ) );
}}
"""
#=====================================================================================================
class Propagator_Base :
def SetTimeTrack(self, dt, timeSteps=128, skipFrames=1, fileName='', compression=None):
self.runTime = float(dt)*timeSteps
self.timeSteps = timeSteps
self.dt = dt
self.skipFrames = skipFrames
self.fileName = fileName
self.compression = compression
#self.timeRange = range(1, self.timeSteps+1)
self.__x_p_representation__ = 'x_p'
self.zero_negative_Function = SourceModule( zero_negative_source,arch="sm_20").get_function("Kernel")
self.sum_stride_Function = SourceModule(sum_stride_source).get_function( "Kernel" )
self.copy_gpuarray = SourceModule(copy_gpuarray_source).get_function( "Kernel" )
self.square_gpuarray_GPU = SourceModule(square_gpuarray_source).get_function( "Kernel" )
def SetPhaseSpaceBox2D(self, X_gridDIM, P_gridDIM, X_amplitude, P_amplitude ):
"""
X_gridDIM: discretization of the x grid.
P_gridDIM: discretization of the p grid. This number is restricted to be always less than 1024
"""
self.X_gridDIM = X_gridDIM
self.P_gridDIM = P_gridDIM
self.X_amplitude = X_amplitude
self.P_amplitude = P_amplitude
self.dX = 2.*X_amplitude/float(X_gridDIM)
self.dP = 2.*P_amplitude/float(P_gridDIM)
self.dTheta = 2.*np.pi/(2.*P_amplitude)
self.Theta_amplitude = self.dTheta*P_gridDIM/2.
self.dLambda = 2.*np.pi/(2.*X_amplitude)
self.Lambda_amplitude = self.dLambda*X_gridDIM/2.
self.SetPhaseSpaceGrid2D()
def SetPhaseSpaceGrid2D(self):
self.X_range = np.linspace(-self.X_amplitude , self.X_amplitude -self.dX , self.X_gridDIM )
self.Lambda_range = np.linspace(-self.Lambda_amplitude , self.Lambda_amplitude-self.dLambda ,self.X_gridDIM)
self.Theta_range = np.linspace(-self.Theta_amplitude , self.Theta_amplitude - self.dTheta , self.P_gridDIM)
self.P_range = np.linspace(-self.P_amplitude , self.P_amplitude-self.dP , self.P_gridDIM)
self.X = fftpack.fftshift(self.X_range)[np.newaxis,:]
self.Theta = fftpack.fftshift(self.Theta_range)[:,np.newaxis]
self.Lambda = fftpack.fftshift(self.Lambda_range)[np.newaxis,:]
self.P = fftpack.fftshift(self.P_range)[:,np.newaxis]
# The operators in GPU are fft shifted
self.X_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.X + 0.*self.P, dtype = np.complex128) )
self.P_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.P + 0.*self.X, dtype = np.complex128) )
self.Theta_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Theta + 0.*self.X, dtype = np.complex128) )
self.X2_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.X**2 + 0.*self.P, dtype=np.complex128))
self.P2_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.P**2 + 0.*self.X, dtype=np.complex128))
self.XP_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.P*self.X ,dtype=np.complex128))
self.phase_LambdaTheta_GPU = gpuarray.to_gpu(
np.ascontiguousarray( np.exp( 0.5*1j*self.Lambda*self.Theta ) , dtype=np.complex128) )
def symplecticExpK(self,mass, dt, c , Z ):
x = Z[0]
p = Z[1]
return Z + np.array([ dt*c*p/mass, 0 ])
def symplecticExpV(self,mass, dt, d , dVdx, Z ):
x = Z[0]
p = Z[1]
return Z + np.array([ 0, -dt*d*dVdx(0.,x) ])
def symplecticExpKExpV2(self, mass, dt , dVdx, Z0 ):
"""
Second order CLASSICAL symplectic propagator step
Parameters:
mass
dt
dVdx function
Z0 : initial state in phase space
"""
c1 = 0.5
c2 = 0.5
d1 = 1.0
d2 = 0.0
Z = self.symplecticExpK(mass,dt,c1,self.symplecticExpV(mass, dt, d1 , dVdx , Z0 ))
return self.symplecticExpK(mass,dt,c2, self.symplecticExpV(mass, dt, d2 , dVdx, Z ) )
def SymplecticPropagator(self, dt, n_iterations, Z0, gammaDamping ):
"""
Second order classical symplectic propagator
Parameters:
mass
dt
dVdx function
Z0 : initial state in phase space
gammaDamping: damping coefficient
"""
Z = Z0
trajectory = []
for i in range(n_iterations):
trajectory.append( np.append(Z,dt*(i+1) ) ) #Add time as third element
Z = self.symplecticExpKExpV2( self.mass, dt , self.dPotential ,Z )
Z[1] = np.exp(-2.*gammaDamping*dt)*Z[1]
return np.array(trajectory)
def SymplecticPropagator_SmoothedP(self, dt, n_iterations, Z0, gammaDamping ):
"""
Second order classical symplectic propagator
Parameters:
mass
dt
dVdx function
Z0 : initial state in phase space
gammaDamping: damping coefficient
"""
Z = Z0
trajectory = []
for i in range(n_iterations):
trajectory.append( np.append(Z,dt*(i+1) ) ) #Add time as third element
Z = self.symplecticExpKExpV2( self.mass, dt , self.dPotential ,Z )
Z[1] = Z[1] - 2.*gammaDamping*dt*np.sign(Z[1])*self.fp_Damping( Z[1] )
return np.array(trajectory)
def save_Frame(self,t,Psi0_GPU):
print ' progress ', 100*t/(self.timeSteps+1), '%'
PsiTemp = Psi0_GPU.get()
self.file.create_dataset(str(t), data = PsiTemp )
def gpuarray_copy(self, dest_GPU, source_GPU):
"""
copy gpuarray
"""
floatSize = dest_GPU.dtype.itemsize
cuda.memcpy_dtod(dest_GPU.ptr, source_GPU.ptr + floatSize*self.gridDIM_X*self.gridDIM_P, 0)
def heaviside(self,x):
x = np.array(x)
if x.shape != ():
y = np.zeros(x.shape)
y[x > 0.0] = 1
y[x == 0.0] = 0.5
else: # special case for 0d array (a number)
if x > 0: y = 1
elif x == 0: y = 0.5
else: y = 0
return y
def Potential(self,t,x):
"""
Potential used to draw the energy level sets
"""
pow = np.power
atan = np.arctan
M_E = np.e
sqrt = np.sqrt
exp = np.exp
Heaviside = self.heaviside
return eval ( self.potentialString, np.__dict__, locals() )
def dPotential(self,t,x):
"""
derivative of Potential
"""
pow = np.power
atan = np.arctan
M_E = np.e
return eval ( self.dPotentialString, np.__dict__, locals() )
def KineticEnergy(self, p ):
pow = np.power
atan = np.arctan
sqrt = np.sqrt
cosh = np.cosh
return eval ( self.kinematicString , np.__dict__, locals() )
def Fourier_XTheta_to_XP(self,M):
return fftpack.ifft( M ,axis=0 )
def Morse_EnergyLevels(self,n):
nu = self.morse_a/(2*np.pi)*np.sqrt(2*self.morse_Depth/self.mass)
return nu*(n + 0.5) - (nu*(n + 0.5))**2/(4*self.morse_Depth) - self.morse_Depth
def MorsePsi(self, n , morse_a, morse_Depth, mass, r0, r):
x = (r-r0)*morse_a
LAMBDA = np.sqrt(2*mass*morse_Depth )/morse_a
z = 2.*LAMBDA*np.exp(-x)
return z**( LAMBDA - n - 0.5 )*np.exp( -0.5*z )*hyp1f1( -n , 2*LAMBDA - 2*n , z )
def fft_shift2D(self, X ):
"""
double fft shift in 2D arrays in both axis
"""
return fftpack.fftshift(fftpack.fftshift(X,axes=1),axes=0)
def Wigner_HarmonicOscillator(self,n,omega,x,p):
"""
Wigner function of the Harmonic oscillator
Parameters
s : standard deviation in x
x,p : center of packet
n : Quantum number
"""
self.representation = 'x_p'
r2 = self.mass*omega**2*((self.X - x))**2 + ((self.P - p ))**2/self.mass
W = (-1)**(n)*laguerre(n)( 2*r2 )*np.exp(-r2 )
norm = np.sum( W )*self.dX*self.dP
return W/norm
def Heaviside(self,x):
"""
Heavisite step function
"""
return 0.5*(np.sign(x) + 1.)
def Psi_Half_HarmonicOscillator(self,n,omega,x0,p0,X):
#return np.exp( - 0.5*X**2 )
k = np.sqrt( self.mass*omega/self.hBar )
return np.exp( 1j*p0 )*np.exp( -0.5*k**2*(X-x0)**2 )*legendre(n)( k*(X-x0) )*self.Heaviside( -(X-x0) )
def Wigner_Half_HarmonicOscillator(self,n,omega,x0,p0):
"""
Wigner function of the Harmonic oscillator
Parameters:
x0,p0 : center of packet
n : Quantum number that must be odd
"""
ncols = self.X_range.shape[0]
X_minus = self.X_range[np.newaxis,:] - 0.5*self.hBar*self.Theta_range[ :, np.newaxis ]
X_plus = self.X_range[np.newaxis,:] + 0.5*self.hBar*self.Theta_range[ :, np.newaxis ]
psi_minus = self.Psi_Half_HarmonicOscillator( n,omega,x0,p0, X_minus )
psi_plus = self.Psi_Half_HarmonicOscillator( n,omega,x0,p0, X_plus )
W = psi_minus * psi_plus.conj()
#W = fftpack.fftshift( np.exp( -X_minus**2 )*np.exp( -X_plus**2 ) )+0j
W = self.Fourier_Theta_To_P_CPU( fftpack.fftshift(W) )
norm = np.sum( W )*self.dX*self.dP
W = W/norm
print ' norm W = ', np.sum( W )*self.dX*self.dP
print ' '
return W
def Fourier_Theta_To_P_CPU(self, W ):
return fftpack.ifft( W , axis = 0)
def Fourier_P_To_Theta_CPU(self, W ):
return fftpack.fft( W , axis = 0)
def Fourier_X_To_Lambda_CPU(self, W ):
return fftpack.fft( W , axis = 1)
def Fourier_Lambda_To_X_CPU(self, W ):
return fftpack.ifft( W , axis = 1)
# GPU
def Fourier_X_To_Lambda_GPU(self, W_GPU):
cuda_fft.fft_Z2Z( W_GPU , W_GPU , self.plan_Z2Z_2D_Axes1 )
def Fourier_Lambda_To_X_GPU(self, W_GPU):
cuda_fft.ifft_Z2Z( W_GPU, W_GPU, self.plan_Z2Z_2D_Axes1 )
W_GPU *= 1./float(self.X_gridDIM)
def Fourier_P_To_Theta_GPU(self, W_GPU):
cuda_fft.fft_Z2Z( W_GPU, W_GPU, self.plan_Z2Z_2D_Axes0 )
def Fourier_Theta_To_P_GPU(self, W_GPU ):
cuda_fft.ifft_Z2Z( W_GPU, W_GPU, self.plan_Z2Z_2D_Axes0 )
W_GPU *= 1./float(self.P_gridDIM)
def Fourier_XTheta_To_LambdaP_GPU(self, W_GPU ):
self.Fourier_X_To_Lambda_GPU( W_GPU)
self.Fourier_Theta_To_P_GPU( W_GPU )
def Fourier_LambdaP_To_XTheta_GPU(self, W_GPU):
self.Fourier_Lambda_To_X_GPU( W_GPU)
self.Fourier_P_To_Theta_GPU( W_GPU)
def sum_gpu_array(self, W_GPU, W_sum_GPU ):
"""
Calculates the sum of GPU array W_GPU.
The size must be power of two
"""
N = W_GPU.size
#print ' N = ', N
n = int( np.log2(N) )
#print ' n = ', n
m = np.int32(0)
#blockCUDA = (512,1,1)
#gridCUDA = (self.X_gridDIM/512, self.P_gridDIM)
for s in range(n):
self.sum_stride_Function( W_GPU, W_sum_GPU, m , block=self.blockCUDA, grid=self.gridCUDA)
m = np.int32( m + 1 )
#print ' m = ', m , ' sum = ', np.real( W_sum_GPU.get()[0] )
return np.real( W_sum_GPU.get()[0] )
def SetCUDA_Constants(self):
self.CUDA_constants = '__constant__ double dt=%f;'%(self.dt)
self.CUDA_constants += '__constant__ double dx=%f;'%(self.dX)
self.CUDA_constants += '__constant__ double dp=%f;'%(self.dP)
self.CUDA_constants += '__constant__ double dtheta=%f;'%(self.dTheta)
self.CUDA_constants += '__constant__ double dlambda=%f;'%(self.dLambda)
self.CUDA_constants += '__constant__ double mass=%f;'%(self.mass)
self.CUDA_constants += '__constant__ double hBar=%f;'%(self.hBar)
self.CUDA_constants += '__constant__ double D_Theta =%f; '%(self.D_Theta)
self.CUDA_constants += '__constant__ double D_Lambda =%f; '%(self.D_Lambda)
def WriteHDF5_variables(self):
self.file['dx'] = self.dX
self.file['dtheta'] = self.dTheta
self.file['dp'] = self.dP
self.file['dlambda'] = self.dLambda
self.file['x_gridDIM'] = self.X_gridDIM
self.file['p_gridDIM'] = self.P_gridDIM
self.file['x_min'] = -self.X_amplitude; self.file['x_max'] = self.X_amplitude - self.dX
self.file['p_min'] = -self.P_amplitude; self.file['p_max'] = self.P_amplitude - self.dP
self.file['lambda_min'] = self.Lambda_range.min(); self.file['lambda_max'] = self.Lambda_range.max()
self.file['dt'] = self.dt;
self.file['timeSteps'] = self.timeSteps
self.file['skipFrames'] = self.skipFrames
def SetFFT_Plans(self):
self.plan_Z2Z_2D = cuda_fft.Plan_Z2Z( (self.P_gridDIM, self.X_gridDIM) , batch=1 )
self.plan_Z2Z_2D_Axes0 = cuda_fft.Plan_Z2Z_2D_Axis0( (self.P_gridDIM,self.X_gridDIM) )
self.plan_Z2Z_2D_Axes1 = cuda_fft.Plan_Z2Z_2D_Axis1( (self.P_gridDIM,self.X_gridDIM) )
self.plan_Z2Z_1D = cuda_fft.Plan_Z2Z( (self.X_gridDIM,) , batch=1 )
def SetCUDA_Functions(self):
self.expPotential_GPU = SourceModule(\
expPotential_source.format(CUDA_constants=self.CUDA_constants,
potentialString=self.potentialString)
).get_function( "Kernel" )
self.expPotential_Bloch_GPU = SourceModule(\
expPotential_source.format(CUDA_constants=self.CUDA_constants,
potentialString=self.potentialString)
).get_function( "Kernel_Bloch" )
self.expPLambdaKinetic_GPU = SourceModule(
expPLambdaKinetic_source.format(
CUDA_constants=self.CUDA_constants,kinematicString=self.kinematicString)
).get_function("Kernel")
self.expPLambdaKinetic_Bloch_GPU = SourceModule(
expPLambdaKinetic_source.format(
CUDA_constants=self.CUDA_constants,kinematicString=self.kinematicString)
).get_function("Kernel_Bloch")
self.theta_fp_Damping_Function = SourceModule(\
theta_fp_source%(self.CUDA_constants,self.fp_Damping_String),
arch="sm_20").get_function("Kernel")
self.gpu_array_copy_Function = SourceModule( gpuarray_copy_source, arch="sm_20").get_function( "Kernel" )
self.roll_FirstRowCopy_Function = SourceModule( roll_FirstRowCopy_source%self.CUDA_constants, arch="sm_20").get_function( "Kernel" )
self.gpu_sum_axis0_Function = SourceModule( gpu_sum_axis0_source%self.CUDA_constants ).get_function( "Kernel" )
self.gpu_sum_axis1_Function = SourceModule( gpu_sum_axis1_source%self.CUDA_constants ).get_function( "Kernel" )
if self.GPitaevskiiCoeff != 0. :
self.expPotential_GrossPitaevskii_GPU = SourceModule(\
expPotential_GrossPitaevskii_source.format(
CUDA_constants=self.CUDA_constants)
).get_function( "Kernel" )
self.expPotential_GrossPitaevskii_Bloch_GPU = SourceModule(\
expPotential_GrossPitaevskii_source.format(
CUDA_constants=self.CUDA_constants)
).get_function( "Kernel_Bloch" )
def WignerFunctionFromFile(self,n, fileName=None):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
W = FILE['/'+str(n)][...]
FILE.close()
return W
def LoadEhrenfestFromFile(self):
FILE = h5py.File( self.fileName ,'r')
timeRange = FILE['timeRange'][...]
timeRangeIndexSaved = FILE['timeRangeIndexSaved'][...]
self.X_average = FILE['/Ehrenfest/X_Ehrenfest'][...]
self.X2_average = FILE['/Ehrenfest/X2_Ehrenfest'][...]
self.P_average = FILE['/Ehrenfest/P_Ehrenfest'][...]
self.P2_average = FILE['/Ehrenfest/P2_Ehrenfest'][...]
self.XP_average = FILE['/Ehrenfest/XP_Ehrenfest'][...]
self.dPotentialdX_average = FILE['/Ehrenfest/dPotentialdX_Ehrenfest'][...]
self.PdPotentialdX_average = FILE['/Ehrenfest/PdPotentialdX_average'][...]
self.XdPotentialdX_average = FILE['/Ehrenfest/XdPotentialdX_average'][...]
self.Hamiltonian_average = FILE['/Ehrenfest/Hamiltonian_average'][...]
self.W_init = FILE['W_init'][...] +0j
self.W_end = FILE['W_end'][...] +0j
FILE.close()
def Ehrenfest_X_FromFile(self, fileName=None):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
X_average = FILE['/Ehrenfest/X_average'][...]
dt = FILE['/dt'][...]
timeSteps = FILE['/timeSteps'][...]
timeRange = np.array( range(0, timeSteps+1) )*dt
FILE.close()
return timeRange , X_average
def Ehrenfest_P_FromFile(self, fileName=None):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
out = FILE['/Ehrenfest/P_average'][...]
dt = FILE['/dt'][...]
timeSteps = FILE['/timeSteps'][...]
timeRange = np.array( range(0, timeSteps+1) )*dt
FILE.close()
return timeRange , out
def Ehrenfest_X2_FromFile(self, fileName=None):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
out = FILE['/Ehrenfest/X2_average'][...]
dt = FILE['/dt'][...]
timeSteps = FILE['/timeSteps'][...]
timeRange = np.array( range(0, timeSteps+1) )*dt
FILE.close()
return timeRange , out
def Ehrenfest_P2_FromFile(self, fileName=None):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
out = FILE['/Ehrenfest/P2_average'][...]
dt = FILE['/dt'][...]
timeSteps = FILE['/timeSteps'][...]
timeRange = np.array( range(0, timeSteps+1) )*dt
FILE.close()
return timeRange , out
def Ehrenfest_XP_FromFile(self, fileName=None):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
out = FILE['/Ehrenfest/XP_average'][...]
dt = FILE['/dt'][...]
timeSteps = FILE['/timeSteps'][...]
timeRange = np.array( range(0, timeSteps+1) )*dt
FILE.close()
return timeRange , out
def Ehrenfest_Hamiltonian_FromFile(self, fileName=None):
if fileName==None:
FILE = h5py.File(self.fileName)
else :
FILE = h5py.File(fileName)
out = FILE['/Ehrenfest/Hamiltonian_average'][...]
dt = FILE['/dt'][...]
timeSteps = FILE['/timeSteps'][...]
timeRange = np.array( range(0, timeSteps+1) )*dt
FILE.close()
return timeRange , out
def WignerMarginal_Probability_x(self,W):
return np.sum( W , axis=0 )*self.dP
def WignerMarginal_Probability_p(self,W):
return np.sum( W , axis=1 )*self.dX
def PlotWignerFrame(self, W_input , plotRange , global_color , energy_Levels ,aspectRatio=1):
x_plotRange,p_plotRange = plotRange
global_min, global_max = global_color
W = W_input.copy()
W = fftpack.fftshift(W.real)
dp = self.dP
p_min = -self.P_amplitude
p_max = self.P_amplitude - dp
x_min = -self.X_amplitude
x_max = self.X_amplitude - self.dX
print 'min = ', np.min( W ), ' max = ', np.max( W )
#print 'final time =', self.timeRange[-1] ,'a.u. =',\
print 'normalization = ', np.sum( W )*self.dX*dp
zero_position = abs( global_min +1e-3) / (abs( global_max) + abs(global_min))
wigner_cdict = {'red' : ((0., 0., 0.),
(zero_position, 1., 1.),
(1., 1., 1.)),
'green' : ((0., 0., 0.),
(zero_position, 1., 1.),
(1., 0., 0.)),
'blue' : ((0., 1., 1.),
(zero_position, 1., 1.),
(1., 0., 0.)) }
wigner_cmap = matplotlib.colors.LinearSegmentedColormap('wigner_colormap', wigner_cdict, 512)
fig, ax = matplotlib.pyplot.subplots(figsize=(12, 10))
cax = ax.imshow( W ,origin='lower',interpolation='none',\
extent=[ x_min , x_max, p_min, p_max], vmin= global_min, vmax=global_max, cmap=wigner_cmap)
min_energy, max_energy, delta_energy = energy_Levels
ax.contour(self.Hamiltonian ,
np.arange( min_energy, max_energy, delta_energy ),
origin='lower',extent=[x_min,x_max,p_min,p_max],
linewidths=0.25,colors='k')
axis_font = {'size':'24'}
ax.set_xlabel(r'$x$',**axis_font)
ax.set_ylabel(r'$p$',**axis_font)
ax.set_xlim( (x_plotRange[0] , x_plotRange[1] ) )
ax.set_ylim( (p_plotRange[0] , p_plotRange[1] ) )
ax.set_aspect(aspectRatio)
#ax.grid('on')
cbar = fig.colorbar(cax, ticks=[-0.3, -0.2,-0.1, 0, 0.1, 0.2 , 0.3])
matplotlib.rcParams.update({'font.size': 18})
return fig
#..................................................................................................
def Product_ThetaP(self, LW_GPU, W_GPU):
"""
Caldeira Legget dissipator
"""
self.gpu_array_copy_Function( LW_GPU , W_GPU , block=self.blockCUDA , grid=self.gridCUDA )
LW_GPU *= self.P_GPU
# x p -> x theta
self.Fourier_P_To_Theta_GPU( LW_GPU )
LW_GPU *= self.Theta_GPU
self.Fourier_Theta_To_P_GPU( LW_GPU )
def CaldeiraDissipatorOrder2(self, LW_GPU, LW_temp_GPU, W_GPU):
LW_GPU *= 0j
LW_GPU += W_GPU
self.ThetaP( LW_temp_GPU , W_GPU )
LW_GPU += 1j * self.dt *self.gammaDamping * LW_temp_GPU
self.ThetaP( LW_temp_GPU , LW_GPU )
W_GPU += 2. * 1j * self.dt *self.gammaDamping * LW_temp_GPU
def Theta_fp_Damping(self, LW_GPU, W_GPU):
self.gpu_array_copy_Function( LW_GPU , W_GPU , block=self.blockCUDA , grid=self.gridCUDA )
self.theta_fp_Damping_Function( LW_GPU , block=self.blockCUDA , grid=self.gridCUDA )
# x p -> theta p
self.Fourier_P_To_Theta_GPU( LW_GPU )
LW_GPU *= self.Theta_GPU
self.Fourier_Theta_To_P_GPU( LW_GPU )
def CaldeiraDissipatorOrder3(self, LW_GPU, LW_temp_GPU, W_GPU, dampingFunction):
# dampingFunction is a function of momentum such as Theta_fp_Damping
LW_GPU *= 0j
LW_GPU += W_GPU
dampingFunction( LW_temp_GPU , W_GPU )
LW_GPU += 2./3. * 1j * self.dt *self.gammaDamping * LW_temp_GPU
dampingFunction( LW_temp_GPU , LW_GPU )
LW_GPU += 2./2. * 1j * self.dt *self.gammaDamping * LW_temp_GPU
dampingFunction( LW_temp_GPU , LW_GPU )
W_GPU += 2. * 1j * self.dt *self.gammaDamping * LW_temp_GPU
def MomentumAuxFun(self,n,m,gridDIM):
pi = np.pi
if(n==m):
return 0. #np.sum( self.P_Range()/self.gridDIM )
else:
return (np.sin( pi*(n-m) )*np.cos( pi*(n-m)/gridDIM) - gridDIM*np.cos( pi*(n-m) )*\
np.sin( pi*(n-m)/gridDIM ))/( np.sin( pi*(n-m)/gridDIM)**2)
def OperatorP_XBasis(self):
"""
Operator P in the X basis
gridDIM: grid dimension
dX: discretization step in X
"""
gridDIM = self.X_gridDIM
P = np.zeros( (gridDIM,gridDIM) )
indexRange = range(gridDIM)
for n in indexRange:
for m in indexRange:
jn = (2*n - gridDIM + 1.)/2.
jm = (2*m - gridDIM + 1.)/2.
P[n,m] = self.MomentumAuxFun(jn,jm, gridDIM)
return np.pi*1j/(self.dX*gridDIM**2)*P
def OperatorX_XBasis(self):
"""
Operator X in the X basis
gridDIM: grid dimension
dX: discretization step in X
"""
return np.diag( self.X_range ).astype(np.complex128)
def g_ODM(self,p,epsilon):
return np.sqrt( self.L_material*self.fp_Damping( np.abs(p) + self.hBar/(2.*self.L_material) ) )
def g_sign_ODM(self,p,epsilon):
return self.g_ODM(p,epsilon)*np.sign(p)
def to_gpu(self, x ):
return gpuarray.to_gpu( np.ascontiguousarray( x , dtype= np.complex128) )
def SetA_ODM(self,epsilon):
X_plus_Theta = (self.X + 0.5*self.hBar*self.Theta)
X_minus_Theta = (self.X - 0.5*self.hBar*self.Theta)
X_plus_ZeroTheta = (self.X + 0.*self.Theta)
X_plus_2Theta = (self.X + self.hBar*self.Theta)
cos_plus = np.cos( X_plus_Theta )
cos_minus = np.cos( X_minus_Theta )
sin_plus = np.sin( X_plus_Theta )
sin_minus = np.sin( X_minus_Theta )
g_plus = self.g_ODM( self.P + 0.5*self.hBar*self.Lambda , epsilon)
g_minus = self.g_ODM( self.P - 0.5*self.hBar*self.Lambda , epsilon)
g_sign_plus = self.g_sign_ODM( self.P + 0.5*self.hBar*self.Lambda ,epsilon)
g_sign_minus = self.g_sign_ODM( self.P - 0.5*self.hBar*self.Lambda ,epsilon)
self.cos_GPU = self.to_gpu( np.cos( X_plus_ZeroTheta ) )
self.cos_plus2_GPU = self.to_gpu( np.cos( X_plus_2Theta ) )
self.cos_plus_GPU = gpuarray.to_gpu( np.ascontiguousarray( cos_plus , dtype= np.complex128) )
self.cos_minus_GPU = gpuarray.to_gpu( np.ascontiguousarray( cos_minus , dtype= np.complex128) )
self.cos_plus_minus_GPU = gpuarray.to_gpu( np.ascontiguousarray( cos_plus*cos_minus, dtype= np.complex128) )
self.sin_GPU = self.to_gpu( np.sin( X_plus_ZeroTheta ) )
self.sin_plus2_GPU = self.to_gpu( np.sin(X_plus_2Theta) )
self.sin_plus_GPU = gpuarray.to_gpu( np.ascontiguousarray( sin_plus , dtype= np.complex128) )
self.sin_minus_GPU = gpuarray.to_gpu( np.ascontiguousarray( sin_minus , dtype= np.complex128) )
self.sin_plus_minus_GPU = gpuarray.to_gpu( np.ascontiguousarray( sin_plus*sin_minus, dtype= np.complex128) )
self.g_ODM_plus_GPU = gpuarray.to_gpu( np.ascontiguousarray( g_plus , dtype= np.complex128) )
self.g_ODM_minus_GPU = gpuarray.to_gpu( np.ascontiguousarray( g_minus , dtype= np.complex128) )
self.gg_ODM_plus_GPU = gpuarray.to_gpu( np.ascontiguousarray( -0.5*g_plus**2, dtype= np.complex128) )
self.gg_ODM_minus_GPU = gpuarray.to_gpu( np.ascontiguousarray( -0.5*g_minus**2, dtype= np.complex128) )
self.g_sign_ODM_plus_GPU = gpuarray.to_gpu( np.ascontiguousarray( g_sign_plus , dtype= np.complex128) )
self.g_sign_ODM_minus_GPU = gpuarray.to_gpu( np.ascontiguousarray( g_sign_minus , dtype= np.complex128) )
def Lindbladian_ODM(self, LW_GPU, LW_temp_GPU, n , W_GPU, sign):
LW_GPU *= 0j
# ---------------------------------
# A W A 1
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_plus2_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_GPU
LW_GPU += LW_temp_GPU # theta x
# ---------------------------------
# A W A 2
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_plus2_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_GPU
LW_temp_GPU *= 1j
LW_GPU += LW_temp_GPU # theta x
# ---------------------------------
# A W A 3
#
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_plus2_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_GPU
LW_temp_GPU *= -1j
LW_GPU += LW_temp_GPU # theta x
# ---------------------------------
# A W A 4
#
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_plus2_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_GPU
LW_GPU += LW_temp_GPU # theta x
#----------------------------------
# ---------------------------------
# AA W 1
#
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_GPU
LW_GPU += LW_temp_GPU # theta x
#----------------------------------
# ---------------------------------
# AA W 2
#
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_GPU
LW_temp_GPU *= -1j
LW_GPU += LW_temp_GPU # theta x
#----------------------------------
# ---------------------------------
# AA W 3
#
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_GPU
LW_temp_GPU *= 1j
LW_GPU += LW_temp_GPU # theta x
#----------------------------------
# ---------------------------------
# AA W 4
#
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_GPU
# theta x -> p lambda
self.Fourier_X_To_Lambda_GPU(LW_temp_GPU )
self.Fourier_Theta_To_P_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_GPU
LW_GPU += LW_temp_GPU # theta x
#-------------------------------------
self.Fourier_Theta_To_P_GPU( LW_GPU )
LW_GPU *= self.dt*2*self.gammaDamping/n
def _Lindbladian_ODM(self, LW_GPU, LW_temp_GPU, n, weight , sign , W_GPU):
#
# Ignoring non-commutativity
#
LW_GPU *= 0j
# ---------------------------------
# A W A
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
LW_temp_GPU *= self.g_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_plus_minus_GPU
LW_GPU += LW_temp_GPU # theta x
# ---------------------------------
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_ODM_plus_GPU
LW_temp_GPU *= self.g_sign_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.cos_minus_GPU
LW_temp_GPU *= self.sin_plus_GPU
LW_temp_GPU *= 1j*sign
LW_GPU += LW_temp_GPU
# ---------------------------------
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
LW_temp_GPU *= self.g_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_minus_GPU
LW_temp_GPU *= self.cos_plus_GPU
LW_temp_GPU *= -1j*sign
LW_GPU += LW_temp_GPU
# ---------------------------------
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.g_sign_ODM_plus_GPU
LW_temp_GPU *= self.g_sign_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_temp_GPU *= self.sin_plus_minus_GPU
LW_GPU += LW_temp_GPU
#-------------------------------------
# ---------------------------------
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.gg_ODM_plus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_GPU += LW_temp_GPU
# ---------------------------------
LW_temp_GPU *= 0j
LW_temp_GPU += W_GPU
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( LW_temp_GPU )
LW_temp_GPU *= self.gg_ODM_minus_GPU
# p lambda -> theta x
self.Fourier_Lambda_To_X_GPU(LW_temp_GPU )
self.Fourier_P_To_Theta_GPU( LW_temp_GPU )
LW_GPU += LW_temp_GPU
#--------------------------------------
self.Fourier_Theta_To_P_GPU( LW_GPU )
LW_GPU *= self.dt*2*self.gammaDamping*weight/n
def Lindbladian_ODM_Order1 (self, W2_GPU, LW_temp_GPU, LW_temp2_GPU, weight , sign, W_GPU):
#sign = 1
#weight = 1.
self._Lindbladian_ODM( LW_temp_GPU , LW_temp2_GPU , 1., weight, sign, W_GPU )
W_GPU += LW_temp_GPU
def Lindbladian_ODM_Order2 (self, W2_GPU, LW_temp_GPU, LW_temp2_GPU, weight , sign, W_GPU):
#sign = 1
#weight = 1.
self._Lindbladian_ODM( LW_temp_GPU , LW_temp2_GPU , 2., weight, sign, W_GPU)
W2_GPU = W_GPU + LW_temp_GPU
self._Lindbladian_ODM( LW_temp_GPU , LW_temp2_GPU , 1., weight, sign, W2_GPU)
W_GPU += LW_temp_GPU
def Lindbladian_ODM_Order3 (self, W2_GPU, LW_temp_GPU, LW_temp2_GPU, weight , sign, W_GPU):
#sign = 1
#weight = 1.
self._Lindbladian_ODM( LW_temp_GPU , LW_temp2_GPU , 3. ,weight, sign, W_GPU)
W2_GPU = W_GPU + LW_temp_GPU
self._Lindbladian_ODM( LW_temp_GPU , LW_temp2_GPU , 2. ,weight, sign, W2_GPU)
W2_GPU = W_GPU + LW_temp_GPU
self._Lindbladian_ODM( LW_temp_GPU , LW_temp2_GPU , 1. ,weight, sign, W2_GPU)
W_GPU += LW_temp_GPU
def fp_Damping(self,p):
# Force = gamma sign(p) f(p)
pow = np.power
atan = np.arctan
sqrt = np.sqrt
M_E = np.e
return eval ( self.fp_Damping_String , np.__dict__, locals() )
def MakeGrossPitaevskiiTerms(self, B_minus_GPU, B_plus_GPU, Prob_X_GPU ):
"""
Makes the non-linear terms that characterize the Gross-Pitaevskii equation
"""
P_gridDIM_32 = np.int32(self.P_gridDIM)
cuda_fft.fft_Z2Z( Prob_X_GPU, Prob_X_GPU, self.plan_Z2Z_1D )
self.roll_FirstRowCopy_Function( B_minus_GPU, Prob_X_GPU, P_gridDIM_32,
block=self.blockCUDA, grid=(self.X_gridDIM/512,1) )
self.roll_FirstRowCopy_Function( B_plus_GPU, Prob_X_GPU, P_gridDIM_32,
block=self.blockCUDA, grid=(self.X_gridDIM/512,1) )
B_minus_GPU /= self.phase_LambdaTheta_GPU
B_plus_GPU *= self.phase_LambdaTheta_GPU
self.Fourier_Lambda_To_X_GPU( B_minus_GPU )
self.Fourier_Lambda_To_X_GPU( B_plus_GPU )
def Purity_GPU(self, W_GPU , W_temp_GPU):
self.square_gpuarray_GPU( W_temp_GPU , W_GPU , block=self.blockCUDA, grid=self.gridCUDA )
return 2*np.pi * self.hBar * gpuarray.sum( W_temp_GPU ).get()*self.dX*self.dP
def ProbabilityX(self, Prob_X_GPU, W_GPU, P_gridDIM_32):
self.gpu_sum_axis0_Function( Prob_X_GPU, W_GPU, P_gridDIM_32,
block=(512,1,1), grid=(self.X_gridDIM/512,1) )
Prob_X_GPU *= self.dP
return Prob_X_GPU.get().real
def ProbabilityP(self, Prob_P_GPU, W_GPU, X_gridDIM_32):
self.gpu_sum_axis1_Function( Prob_P_GPU, W_GPU, X_gridDIM_32,
block=(512,1,1), grid=(self.P_gridDIM/512,1) )
Prob_P_GPU *= self.dX
return Prob_P_GPU.get().real
def NonLinearEnergy(self,ProbabilityX):
return self.dX*0.5*self.GPitaevskiiCoeff * np.sum( ProbabilityX**2 )
#=====================================================================================================
#
# Propagation Wigner
#
#=====================================================================================================
class GPU_Wigner2D_GPitaevskii(Propagator_Base):
"""
Wigner Propagator in 2D phase space with diffusion and amplituse damping
"""
def __init__(self,X_gridDIM,P_gridDIM,X_amplitude,P_amplitude, hBar ,mass,
D_Theta, D_Lambda, gammaDamping, potentialString, dPotentialString,kinematicString,
normalization = 'Wigner'):
"""
"""
self.normalization = normalization
self.D_Theta = D_Theta
self.D_Lambda = D_Lambda
self.gammaDamping = gammaDamping
self.potentialString = potentialString
self.dPotentialString = dPotentialString
self.kinematicString = kinematicString
self.SetPhaseSpaceBox2D(X_gridDIM, P_gridDIM, X_amplitude, P_amplitude)
self.hBar = hBar
self.mass = mass
self.SetCUDA_Constants()
if self.GPitaevskiiCoeff != 0. :
self.CUDA_constants += '__constant__ double a_GP = %f; '%( self.GPitaevskiiCoeff )
##################
self.SetFFT_Plans()
self.SetCUDA_Functions()
self.Hamiltonian = self.P**2 / (2.*self.mass) + self.Potential(0,self.X)
self.Hamiltonian_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Hamiltonian.astype(np.complex128) ) )
#self.f.create_dataset('Hamiltonian', data = self.Hamiltonian.real )
self.Hamiltonian = self.fft_shift2D( self.Hamiltonian )
def Run(self ):
try :
import os
os.remove (self.fileName)
except OSError:
pass
self.file = h5py.File(self.fileName)
self.WriteHDF5_variables()
self.file.create_dataset('Hamiltonian', data = self.Hamiltonian.real )
print " X_gridDIM = ", self.X_gridDIM, " P_gridDIM = ", self.P_gridDIM
print " dx = ", self.dX, " dp = ", self.dP
print " dLambda = ", self.dLambda, " dTheta = ", self.dTheta
print ' '
print ' GPU memory Total ', pycuda.driver.mem_get_info()[1]/float(2**30) , 'GB'
print ' GPU memory Free ', pycuda.driver.mem_get_info()[0]/float(2**30) , 'GB'
timeRangeIndex = range(0, self.timeSteps+1)
W_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
norm = gpuarray.sum( W_GPU ).get()*self.dX*self.dP
W_GPU /= norm
print 'Initial W Norm = ', gpuarray.sum( W_GPU ).get()*self.dX*self.dP
if self.dampingFunction == 'ODM':
self.SetA_ODM(self.epsilon)
dPotentialdX = self.dPotential(0. , self.X) + 0.*self.P
self.dPotentialdX_GPU = gpuarray.to_gpu( np.ascontiguousarray( dPotentialdX.astype(np.complex128) ) )
PdV = self.P*self.dPotential(0. , self.X)
self.PdPotentialdX_GPU = gpuarray.to_gpu( np.ascontiguousarray( PdV.astype(np.complex128) ) )
XdV = self.dPotential(0. , self.X)*self.X + 0.*self.P
self.XdPotentialdX_GPU = gpuarray.to_gpu( np.ascontiguousarray( XdV.astype(np.complex128) ) )
LW_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
LW_temp_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
LW_temp2_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
print ' GPU memory Free post gpu loading ', pycuda.driver.mem_get_info()[0]/float(2**30) , 'GB'
print ' ------------------------------------------------------------------------------- '
print ' Split Operator Propagator GPU with damping '
print ' ------------------------------------------------------------------------------- '
if self.GPitaevskiiCoeff != 0. :
B_GP_minus_GPU = gpuarray.empty_like( W_GPU )
B_GP_plus_GPU = gpuarray.empty_like( W_GPU )
Prob_X_GPU = gpuarray.empty( (self.X_gridDIM) , dtype = np.complex128 )
timeRange = []
timeRangeIndexSaved = []
X_average = []
X2_average = []
dPotentialdX_average = []
PdPotentialdX_average = []
XdPotentialdX_average = []
P_average = []
P2_average = []
XP_average = []
Overlap = []
Hamiltonian_average = []
ProbabilitySquare_average = []
negativeArea = []
dXdP = self.dX * self.dP
self.blockCUDA = (512,1,1)
self.gridCUDA = (self.X_gridDIM/512, self.P_gridDIM)
P_gridDIM_32 = np.int32(self.P_gridDIM)
for tIndex in timeRangeIndex:
t = (tIndex)*self.dt
t_GPU = np.float64(t)
timeRange.append(t)
if self.GPitaevskiiCoeff != 0. :
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32 )
self.MakeGrossPitaevskiiTerms( B_GP_minus_GPU, B_GP_plus_GPU, Prob_X_GPU )
# p x -> theta x
self.Fourier_P_To_Theta_GPU( W_GPU )
self.expPotential_GPU( t_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
if self.GPitaevskiiCoeff != 0. :
self.expPotential_GrossPitaevskii_GPU( t_GPU, W_GPU, B_GP_minus_GPU, B_GP_plus_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
######################## Kinetic Term #########################
# theta x -> p x
self.Fourier_Theta_To_P_GPU( W_GPU )
# p x -> p lambda
self.Fourier_X_To_Lambda_GPU( W_GPU )
self.expPLambdaKinetic_GPU( W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
# p lambda -> p x
self.Fourier_Lambda_To_X_GPU( W_GPU )
################################################################
if self.GPitaevskiiCoeff != 0. :
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
ProbabilitySquare_average.append( np.sum( Prob_X_GPU.get()**2 )*self.dX )
self.MakeGrossPitaevskiiTerms( B_GP_minus_GPU, B_GP_plus_GPU, Prob_X_GPU )
###################### p x -> theta x #########################
self.Fourier_P_To_Theta_GPU( W_GPU )
self.expPotential_GPU( t_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
if self.GPitaevskiiCoeff != 0. :
self.expPotential_GrossPitaevskii_GPU( t_GPU, W_GPU, B_GP_minus_GPU, B_GP_plus_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#
# theta x -> p x
#
self.Fourier_Theta_To_P_GPU( W_GPU )
if self.gammaDamping != 0.:
if self.dampingFunction == 'CaldeiraLeggett':
self.CaldeiraDissipatorOrder3( LW_GPU, LW_temp_GPU, W_GPU, self.Theta_fp_Damping )
if self.dampingFunction == 'ODM':
weight = 1.
sign = 1. # 1 for damping and -1 for pumping
self.Lindbladian_ODM_Order1 ( LW_GPU, LW_temp_GPU, LW_temp2_GPU, weight, sign, W_GPU)
norm = gpuarray.sum( W_GPU ).get()*(self.dX*self.dP)
W_GPU /= norm
#....................... Saving ............................
X_average.append( dXdP*gpuarray.dot(W_GPU,self.X_GPU ).get() )
X2_average.append( dXdP*gpuarray.dot(W_GPU,self.X2_GPU ).get() )
P_average.append( dXdP*gpuarray.dot(W_GPU,self.P_GPU ).get() )
P2_average.append( dXdP*gpuarray.dot(W_GPU,self.P2_GPU ).get() )
XP_average.append( dXdP*gpuarray.dot(W_GPU,self.XP_GPU ).get() )
dPotentialdX_average.append(
dXdP*gpuarray.dot(W_GPU,self.dPotentialdX_GPU).get() )
PdPotentialdX_average.append(
dXdP*gpuarray.dot(W_GPU,self.PdPotentialdX_GPU).get() )
XdPotentialdX_average.append(
dXdP*gpuarray.dot(W_GPU,self.XdPotentialdX_GPU).get() )
Hamiltonian_average.append(
dXdP*gpuarray.dot(W_GPU,self.Hamiltonian_GPU).get() )
self.zero_negative_Function( LW_temp_GPU , W_GPU, block=self.blockCUDA, grid=self.gridCUDA)
negativeArea.append( gpuarray.sum(LW_temp_GPU).get()*dXdP )
if tIndex%self.skipFrames == 0:
timeRangeIndexSaved.append(tIndex)
self.save_Frame(tIndex,W_GPU)
self.timeRange = np.array(timeRange)
self.X_average = np.array(X_average).real
self.X2_average = np.array(X2_average).real
self.P_average = np.array(P_average).real
self.P2_average = np.array(P2_average).real
self.XP_average = np.array(XP_average).real
self.dPotentialdX_average = np.array( dPotentialdX_average ).real
self.PdPotentialdX_average = np.array( PdPotentialdX_average ).real
self.XdPotentialdX_average = np.array( XdPotentialdX_average ).real
self.Hamiltonian_average = np.array( Hamiltonian_average ).real
self.ProbabilitySquare_average = np.array(ProbabilitySquare_average).real
self.negativeArea = np.array(negativeArea).real
self.file['timeRange'] = timeRange
self.file['timeRangeIndexSaved'] = timeRangeIndexSaved
self.file['/Ehrenfest/X_Ehrenfest'] = self.X_average
self.file['/Ehrenfest/X2_Ehrenfest'] = self.X2_average
self.file['/Ehrenfest/P_Ehrenfest'] = self.P_average
self.file['/Ehrenfest/P2_Ehrenfest'] = self.P2_average
self.file['/Ehrenfest/XP_Ehrenfest'] = self.XP_average
self.file['/Ehrenfest/dPotentialdX_Ehrenfest'] = self.dPotentialdX_average
self.file['/Ehrenfest/PdPotentialdX_average'] = self.PdPotentialdX_average
self.file['/Ehrenfest/XdPotentialdX_average'] = self.XdPotentialdX_average
self.file['/Ehrenfest/Hamiltonian_average'] = self.Hamiltonian_average
self.file['/Ehrenfest/ProbabilitySquare_average'] = self.ProbabilitySquare_average
self.file['W_init'] = self.W_init.real
# theta x -> p x
#self.Fourier_Theta_To_P_GPU( W_GPU )
self.W_end = W_GPU.get().real
self.file['W_end'] = self.W_end.real
#norm = gpuarray.sum( W_GPU ).get()*(self.dX*self.dP)
#print '******* norm = ', norm.real
self.file['negativeArea'] = self.negativeArea
self.file.close()
cuda_fft.cufftDestroy( self.plan_Z2Z_2D.handle )
cuda_fft.cufftDestroy( self.plan_Z2Z_2D_Axes0.handle )
cuda_fft.cufftDestroy( self.plan_Z2Z_2D_Axes1.handle )
return 0
#=====================================================================================================
#
# Propagation Bloch:
#
#=====================================================================================================
class GPU_Wigner2D_GPitaevskii_Bloch(Propagator_Base):
"""
Wigner Bloch Propagator in 2D phase space with diffusion and amplituse damping
"""
def __init__(self,X_gridDIM,P_gridDIM,X_amplitude,P_amplitude, hBar ,mass,
potentialString, kinematicString):
"""
"""
self.D_Theta = 0.
self.D_Lambda = 0.
self.fp_Damping_String = '0.'
self.potentialString = potentialString
self.dPotentialString = '0.'
self.kinematicString = kinematicString
self.SetPhaseSpaceBox2D(X_gridDIM, P_gridDIM, X_amplitude, P_amplitude)
self.hBar = hBar
self.mass = mass
self.SetCUDA_Constants()
if self.GPitaevskiiCoeff != 0. :
self.CUDA_constants += '__constant__ double a_GP = %f; '%( self.GPitaevskiiCoeff )
##################
self.SetFFT_Plans()
self.SetCUDA_Functions()
self.Hamiltonian = self.KineticEnergy(self.P) + self.Potential(0,self.X)
self.Hamiltonian_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.Hamiltonian.astype(np.complex128) ) )
#self.f.create_dataset('Hamiltonian', data = self.Hamiltonian.real )
self.Hamiltonian = self.fft_shift2D( self.Hamiltonian )
def Run(self ):
try :
import os
os.remove (self.fileName)
except OSError:
pass
self.file = h5py.File(self.fileName)
self.WriteHDF5_variables()
self.file.create_dataset('Hamiltonian', data = self.Hamiltonian.real )
print " X_gridDIM = ", self.X_gridDIM, " P_gridDIM = ", self.P_gridDIM
print " dx = ", self.dX, " dp = ", self.dP
print " dLambda = ", self.dLambda, " dTheta = ", self.dTheta
print ' '
print ' GPU memory Total ', pycuda.driver.mem_get_info()[1]/float(2**30) , 'GB'
print ' GPU memory Free ', pycuda.driver.mem_get_info()[0]/float(2**30) , 'GB'
timeRangeIndex = range(0, self.timeSteps+1)
W_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
norm = gpuarray.sum( W_GPU ).get()*self.dX*self.dP
W_GPU /= norm
print 'Initial W Norm = ', gpuarray.sum( W_GPU ).get()*self.dX*self.dP
W_step_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
W_temp_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
print ' GPU memory Free post gpu loading ', pycuda.driver.mem_get_info()[0]/float(2**30) , 'GB'
print ' ------------------------------------------------------------------------------- '
print ' Split Operator Propagator GPU with damping '
print ' ------------------------------------------------------------------------------- '
if self.GPitaevskiiCoeff != 0. :
B_GP_minus_GPU = gpuarray.empty_like( W_GPU )
B_GP_plus_GPU = gpuarray.empty_like( W_GPU )
Prob_X_GPU = gpuarray.empty( (self.X_gridDIM) , dtype = np.complex128 )
timeRange = []
timeRangeIndexSaved = []
X_average = []
X2_average = []
P_average = []
P2_average = []
Hamiltonian_average = []
TotalEnergyHistory = []
NonLinearEnergyHistory = []
purity = []
dXdP = self.dX * self.dP
self.blockCUDA = (512,1,1)
self.gridCUDA = (self.X_gridDIM/512, self.P_gridDIM)
P_gridDIM_32 = np.int32(self.P_gridDIM)
dt_GPU = np.float64(self.dt)
TotalEnergy = dXdP*gpuarray.dot(W_GPU,self.Hamiltonian_GPU).get()
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
TotalEnergy += self.NonLinearEnergy( Prob_X_GPU.get() )
purity_t = self.Purity_GPU( W_GPU , W_temp_GPU )
#...................................................................
print ' '
print ' '
for tIndex in timeRangeIndex:
#print ' '
t = (tIndex)*self.dt
t_GPU = np.float64(t)
timeRange.append(t)
if self.GPitaevskiiCoeff != 0. :
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
self.MakeGrossPitaevskiiTerms( B_GP_minus_GPU, B_GP_plus_GPU, Prob_X_GPU )
#___________________ p x -> theta x ______________________________________
self.Fourier_P_To_Theta_GPU( W_GPU )
self.expPotential_Bloch_GPU( dt_GPU, t_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
if self.GPitaevskiiCoeff != 0. :
self.expPotential_GrossPitaevskii_Bloch_GPU(
dt_GPU, W_GPU, B_GP_minus_GPU, B_GP_plus_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#__________________ theta x -> p lambda ________________________________
self.Fourier_Theta_To_P_GPU( W_GPU ); self.Fourier_X_To_Lambda_GPU( W_GPU )
######################## Kinetic Term #####################################################
self.expPLambdaKinetic_Bloch_GPU( dt_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
###########################################################################################
#__________________ p lambda -> p x _______________________________________
self.Fourier_Lambda_To_X_GPU( W_GPU )
if self.GPitaevskiiCoeff != 0. :
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
self.MakeGrossPitaevskiiTerms( B_GP_minus_GPU, B_GP_plus_GPU, Prob_X_GPU )
#__________________ p x -> theta x_________________________________________
self.Fourier_P_To_Theta_GPU( W_GPU )
self.expPotential_Bloch_GPU( dt_GPU, t_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
if self.GPitaevskiiCoeff != 0. :
self.expPotential_GrossPitaevskii_Bloch_GPU(
dt_GPU, W_GPU, B_GP_minus_GPU, B_GP_plus_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#__________________ theta x -> p x _____________________________________________
self.Fourier_Theta_To_P_GPU( W_GPU )
norm = gpuarray.sum( W_GPU ).get()*(self.dX*self.dP)
W_GPU /= norm
#....................... Saving ............................
X_average.append( dXdP*gpuarray.dot(W_GPU,self.X_GPU ).get() )
X2_average.append( dXdP*gpuarray.dot(W_GPU,self.X2_GPU ).get() )
P_average.append( dXdP*gpuarray.dot(W_GPU,self.P_GPU ).get() )
P2_average.append( dXdP*gpuarray.dot(W_GPU,self.P2_GPU ).get() )
Hamiltonian_average.append(
dXdP*gpuarray.dot(W_GPU,self.Hamiltonian_GPU).get() )
# .........................................................................................
TotalEnergy_step = dXdP*gpuarray.dot(W_GPU,self.Hamiltonian_GPU).get()
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
NonLinearEnergy_step = self.NonLinearEnergy( Prob_X_GPU.get() )
TotalEnergy_step += NonLinearEnergy_step
#...........................................................................................
purity_step = self.Purity_GPU( W_GPU , W_temp_GPU )
#...........................................................................................
if tIndex%self.skipFrames == 0:
print 'step ', tIndex
#timeRangeIndexSaved.append(tIndex)
#self.save_Frame(tIndex,W_GPU)
#print ' Energy_step = ', TotalEnergy_step, 'Energy = ', TotalEnergy
if tIndex > 0:
if (TotalEnergy_step < TotalEnergy) and ( np.abs( purity_step ) < 1. ):
#print 'Physical; dt =', dt_GPU
self.copy_gpuarray( W_step_GPU, W_GPU, block=self.blockCUDA,grid=self.gridCUDA)
purity.append( purity_step )
TotalEnergyHistory.append( TotalEnergy_step )
TotalEnergy = TotalEnergy_step
NonLinearEnergyHistory.append( NonLinearEnergy_step )
else:
print 'dt = ', dt_GPU
dt_GPU = np.float64(dt_GPU/2.)
self.copy_gpuarray( W_GPU, W_step_GPU, block=self.blockCUDA,grid=self.gridCUDA)
self.timeRange = np.array(timeRange)
self.X_average = np.array(X_average).real
self.X2_average = np.array(X2_average).real
self.P_average = np.array(P_average).real
self.P2_average = np.array(P2_average).real
self.Hamiltonian_average = np.array( Hamiltonian_average ).real
self.TotalEnergyHistory = np.array( TotalEnergyHistory ).real
if self.GPitaevskiiCoeff != 0. :
self.NonLinearEnergyHistory = np.array(NonLinearEnergyHistory).real
self.purity = np.array( purity ).real
self.file['timeRange'] = timeRange
self.file['timeRangeIndexSaved'] = timeRangeIndexSaved
self.file['/Ehrenfest/X_Ehrenfest'] = self.X_average
self.file['/Ehrenfest/X2_Ehrenfest'] = self.X2_average
self.file['/Ehrenfest/P_Ehrenfest'] = self.P_average
self.file['/Ehrenfest/P2_Ehrenfest'] = self.P2_average
self.file['/Ehrenfest/Hamiltonian_average'] = self.Hamiltonian_average
self.file['/Potential'] = self.Potential(0, self.X_range)
self.file['/PotentialString'] = self.potentialString
self.file['W_init'] = self.W_init.real
self.W_0_GPU = W_GPU.copy()
self.W_0 = W_GPU.get().real
self.file['W_0'] = self.W_0
#self.file.close()
#cuda_fft.cufftDestroy( self.plan_Z2Z_2D.handle )
#cuda_fft.cufftDestroy( self.plan_Z2Z_2D_Axes0.handle )
#cuda_fft.cufftDestroy( self.plan_Z2Z_2D_Axes1.handle )
return 0
def Run_ExitedState1(self, timeSteps=None ):
print ' GPU memory Total ', pycuda.driver.mem_get_info()[1]/float(2**30) , 'GB'
print ' GPU memory Free ', pycuda.driver.mem_get_info()[0]/float(2**30) , 'GB'
if timeSteps==None:
timeRangeIndex = range(0, self.timeSteps+1)
else:
timeRangeIndex = range(0, timeSteps+1)
W_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
norm = gpuarray.sum( W_GPU ).get()*self.dX*self.dP
W_GPU /= norm
print 'Initial W Norm = ', gpuarray.sum( W_GPU ).get()*self.dX*self.dP
cW_GPU = W_GPU.copy()
W_step_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
W_temp_GPU = gpuarray.to_gpu( np.ascontiguousarray( self.W_init , dtype = np.complex128) )
print ' GPU memory Free post gpu loading ', pycuda.driver.mem_get_info()[0]/float(2**30) , 'GB'
print ' ------------------------------------------------------------------------------- '
print ' Split Operator Propagator GPU with damping '
print ' ------------------------------------------------------------------------------- '
if self.GPitaevskiiCoeff != 0. :
B_GP_minus_GPU = gpuarray.empty_like( W_GPU )
B_GP_plus_GPU = gpuarray.empty_like( W_GPU )
Prob_X_GPU = gpuarray.empty( (self.X_gridDIM) , dtype = np.complex128 )
Prob_P_GPU = gpuarray.empty( (self.P_gridDIM) , dtype = np.complex128 )
timeRange = []
timeRangeIndexSaved = []
X_average = []
X2_average = []
P_average = []
P2_average = []
Hamiltonian_average = []
TotalEnergyHistory = []
NonLinearEnergyHistory = []
purity = []
dXdP = self.dX * self.dP
self.blockCUDA = (512,1,1)
self.gridCUDA = (self.X_gridDIM/512, self.P_gridDIM)
P_gridDIM_32 = np.int32(self.P_gridDIM)
X_gridDIM_32 = np.int32(self.X_gridDIM)
dt_GPU = np.float64(self.dt)
TotalEnergy = dXdP*gpuarray.dot(W_GPU,self.Hamiltonian_GPU).get()
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
TotalEnergy += self.NonLinearEnergy( Prob_X_GPU.get() )
purity_t = self.Purity_GPU( W_GPU , W_temp_GPU )
#...................................................................
print ' '
print ' '
for tIndex in timeRangeIndex:
#print ' '
t = (tIndex)*self.dt
t_GPU = np.float64(t)
timeRange.append(t)
if self.GPitaevskiiCoeff != 0. :
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
self.MakeGrossPitaevskiiTerms( B_GP_minus_GPU, B_GP_plus_GPU, Prob_X_GPU )
#___________________ p x -> theta x ______________________________________
self.Fourier_P_To_Theta_GPU( W_GPU )
self.expPotential_Bloch_GPU( dt_GPU, t_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
if self.GPitaevskiiCoeff != 0. :
self.expPotential_GrossPitaevskii_Bloch_GPU(
dt_GPU, W_GPU, B_GP_minus_GPU, B_GP_plus_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#__________________ theta x -> p lambda ________________________________
self.Fourier_Theta_To_P_GPU( W_GPU ); self.Fourier_X_To_Lambda_GPU( W_GPU )
######################## Kinetic Term #####################################################
self.expPLambdaKinetic_Bloch_GPU( dt_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
###########################################################################################
#__________________ p lambda -> p x _______________________________________
self.Fourier_Lambda_To_X_GPU( W_GPU )
if self.GPitaevskiiCoeff != 0. :
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
self.MakeGrossPitaevskiiTerms( B_GP_minus_GPU, B_GP_plus_GPU, Prob_X_GPU )
#__________________ p x -> theta x_________________________________________
self.Fourier_P_To_Theta_GPU( W_GPU )
self.expPotential_Bloch_GPU( dt_GPU, t_GPU, W_GPU, block=self.blockCUDA, grid=self.gridCUDA )
if self.GPitaevskiiCoeff != 0. :
self.expPotential_GrossPitaevskii_Bloch_GPU(
dt_GPU, W_GPU, B_GP_minus_GPU, B_GP_plus_GPU,
block=self.blockCUDA, grid=self.gridCUDA )
#__________________ theta x -> p x _____________________________________________
self.Fourier_Theta_To_P_GPU( W_GPU )
c = 2*np.pi*self.hBar*dXdP*gpuarray.dot(W_GPU,self.W_0_GPU ).get()
cW_GPU *= 0.
cW_GPU += c*self.W_0_GPU
W_GPU -= cW_GPU
norm = gpuarray.sum( W_GPU ).get()*(self.dX*self.dP)
W_GPU /= norm
#....................... Saving ............................
X_average.append( dXdP*gpuarray.dot(W_GPU,self.X_GPU ).get() )
X2_average.append( dXdP*gpuarray.dot(W_GPU,self.X2_GPU ).get() )
P_average.append( dXdP*gpuarray.dot(W_GPU,self.P_GPU ).get() )
P2_average.append( dXdP*gpuarray.dot(W_GPU,self.P2_GPU ).get() )
Hamiltonian_average.append(
dXdP*gpuarray.dot(W_GPU,self.Hamiltonian_GPU).get() )
# .........................................................................................
TotalEnergy_step = dXdP*gpuarray.dot(W_GPU,self.Hamiltonian_GPU).get()
self.ProbabilityX( Prob_X_GPU, W_GPU, P_gridDIM_32)
NonLinearEnergy_step = self.NonLinearEnergy( Prob_X_GPU.get() )
TotalEnergy_step += NonLinearEnergy_step
self.ProbabilityP( Prob_P_GPU, W_GPU, P_gridDIM_32)
negativeProb_p_Q = any( x < 0 for x in Prob_P_GPU.get() )
#print ' neg Prob P ', negativeProb_p_Q
#...........................................................................................
purity_step = self.Purity_GPU( W_GPU , W_temp_GPU )
#...........................................................................................
if tIndex%self.skipFrames == 0:
print 'step ', tIndex
if tIndex > 0:
if (TotalEnergy_step < TotalEnergy) and ( np.abs( purity_step ) < 1. ) and negativeProb_p_Q==False:
self.copy_gpuarray( W_step_GPU, W_GPU, block=self.blockCUDA,grid=self.gridCUDA)
purity.append( purity_step )
TotalEnergyHistory.append( TotalEnergy_step )
TotalEnergy = TotalEnergy_step
NonLinearEnergyHistory.append( NonLinearEnergy_step )
else:
print 'dt = ', dt_GPU
dt_GPU = np.float64(dt_GPU/2.)
self.copy_gpuarray( W_GPU, W_step_GPU, block=self.blockCUDA,grid=self.gridCUDA)
self.timeRange = np.array(timeRange)
self.X_average = np.array(X_average).real
self.X2_average = np.array(X2_average).real
self.P_average = np.array(P_average).real
self.P2_average = np.array(P2_average).real
self.Hamiltonian_average = np.array( Hamiltonian_average ).real
self.TotalEnergyHistory = np.array( TotalEnergyHistory ).real
if self.GPitaevskiiCoeff != 0. :
self.NonLinearEnergyHistory = np.array(NonLinearEnergyHistory).real
self.purity = np.array( purity ).real
self.W_1 = W_GPU.get().real
self.file['W_1'] = self.W_1
self.file.close()
#cuda_fft.cufftDestroy( self.plan_Z2Z_2D.handle )
#cuda_fft.cufftDestroy( self.plan_Z2Z_2D_Axes0.handle )
#cuda_fft.cufftDestroy( self.plan_Z2Z_2D_Axes1.handle )
return 0
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/sphinx/__init__.py
|
3
|
# -*- coding: utf-8 -*-
"""
Sphinx
~~~~~~
The Sphinx documentation toolchain.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Keep this file executable as-is in Python 3!
# (Otherwise getting the version out of it from setup.py is impossible.)
from __future__ import absolute_import
import os
import sys
import warnings
from os import path
from .deprecation import RemovedInNextVersionWarning
# by default, all DeprecationWarning under sphinx package will be emit.
# Users can avoid this by using environment variable: PYTHONWARNINGS=
if 'PYTHONWARNINGS' not in os.environ:
warnings.filterwarnings('default',
category=RemovedInNextVersionWarning, module='sphinx')
# docutils.io using mode='rU' for open
warnings.filterwarnings('ignore', "'U' mode is deprecated",
DeprecationWarning, module='docutils.io')
__version__ = '1.5.1'
__released__ = '1.5.1' # used when Sphinx builds its own docs
# version info for better programmatic use
# possible values for 3rd element: 'alpha', 'beta', 'rc', 'final'
# 'final' has 0 as the last element
version_info = (1, 5, 1, 'final', 0)
package_dir = path.abspath(path.dirname(__file__))
__display_version__ = __version__ # used for command line version
if __version__.endswith('+'):
# try to find out the changeset hash if checked out from hg, and append
# it to __version__ (since we use this value from setup.py, it gets
# automatically propagated to an installed copy as well)
__display_version__ = __version__
__version__ = __version__[:-1] # remove '+' for PEP-440 version spec.
try:
import subprocess
p = subprocess.Popen(['git', 'show', '-s', '--pretty=format:%h',
path.join(package_dir, '..')],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out:
__display_version__ += '/' + out.decode().strip()
except Exception:
pass
def main(argv=sys.argv):
if sys.argv[1:2] == ['-M']:
sys.exit(make_main(argv))
else:
sys.exit(build_main(argv))
def build_main(argv=sys.argv):
"""Sphinx build "main" command-line entry."""
if (sys.version_info[:3] < (2, 7, 0) or
(3, 0, 0) <= sys.version_info[:3] < (3, 4, 0)):
sys.stderr.write('Error: Sphinx requires at least Python 2.7 or 3.4 to run.\n')
return 1
try:
from sphinx import cmdline
except ImportError:
err = sys.exc_info()[1]
errstr = str(err)
if errstr.lower().startswith('no module named'):
whichmod = errstr[16:]
hint = ''
if whichmod.startswith('docutils'):
whichmod = 'Docutils library'
elif whichmod.startswith('jinja'):
whichmod = 'Jinja2 library'
elif whichmod == 'roman':
whichmod = 'roman module (which is distributed with Docutils)'
hint = ('This can happen if you upgraded docutils using\n'
'easy_install without uninstalling the old version'
'first.\n')
else:
whichmod += ' module'
sys.stderr.write('Error: The %s cannot be found. '
'Did you install Sphinx and its dependencies '
'correctly?\n' % whichmod)
if hint:
sys.stderr.write(hint)
return 1
raise
from sphinx.util.compat import docutils_version
if docutils_version < (0, 10):
sys.stderr.write('Error: Sphinx requires at least Docutils 0.10 to '
'run.\n')
return 1
return cmdline.main(argv)
def make_main(argv=sys.argv):
"""Sphinx build "make mode" entry."""
from sphinx import make_mode
return make_mode.run_make_mode(argv[2:])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
arjunaari/tweater
|
refs/heads/master
|
py/yaml/serializer.py
|
561
|
__all__ = ['Serializer', 'SerializerError']
from error import YAMLError
from events import *
from nodes import *
class SerializerError(YAMLError):
pass
class Serializer(object):
ANCHOR_TEMPLATE = u'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
|
ilyes14/scikit-learn
|
refs/heads/master
|
sklearn/gaussian_process/gaussian_process.py
|
78
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
mbernasocchi/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsserver_wms_getprint_extra.py
|
29
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer WMS GetPrint.
From build dir, run: ctest -R PyQgsServerWMSGetPrintExtra -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'René-Luc DHONT'
__date__ = '25/06/2020'
__copyright__ = 'Copyright 2020, The QGIS Project'
import os
# Needed on Qt 5 so that the serialization of XML is consistent among all executions
os.environ['QT_HASH_SEED'] = '1'
import urllib.parse
from qgis.testing import unittest
from test_qgsserver import QgsServerTestBase
from qgis.server import QgsServerRequest
class TestQgsServerWMSGetPrintExtra(QgsServerTestBase):
"""QGIS Server WMS Tests for GetPrint group request"""
def test_wms_getprint_selection(self):
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetPrint",
"TEMPLATE": "layoutA4",
"FORMAT": "png",
"LAYERS": "Country,Hello",
"map0:EXTENT": "-33626185.498,-13032965.185,33978427.737,16020257.031",
"map0:LAYERS": "Country,Hello",
"CRS": "EPSG:3857",
"SELECTION": "Country: 4"
}.items())])
r, h = self._result(self._execute_request(qs))
self._img_diff_error(r, h, "WMS_GetPrint_Selection")
def test_wms_getprint_opacity(self):
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetPrint",
"TEMPLATE": "layoutA4",
"FORMAT": "png",
"map0%3AEXTENT": "-33626185.498,-13032965.185,33978427.737,16020257.031",
"map0:LAYERS": "Country,Hello",
"CRS": "EPSG:3857",
"SELECTION": "Country: 4",
"LAYERS": "Country,Hello",
"OPACITIES": "125,125"
}.items())])
r, h = self._result(self._execute_request(qs))
self._img_diff_error(r, h, "WMS_GetPrint_Opacity")
def test_wms_getprint_opacity_post(self):
qs = "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetPrint",
"TEMPLATE": "layoutA4",
"FORMAT": "png",
"map0%3AEXTENT": "-33626185.498,-13032965.185,33978427.737,16020257.031",
"map0:LAYERS": "Country,Hello",
"CRS": "EPSG:3857",
"SELECTION": "Country: 4",
"LAYERS": "Country,Hello",
"OPACITIES": "125%2C125"
}.items())])
r, h = self._result(self._execute_request('', QgsServerRequest.PostMethod, data=qs.encode('utf-8')))
self._img_diff_error(r, h, "WMS_GetPrint_Opacity")
def test_wms_getprint_highlight(self):
# default style
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetPrint",
"TEMPLATE": "layoutA4",
"FORMAT": "png",
"map0:EXTENT": "-33626185.498,-13032965.185,33978427.737,16020257.031",
"map0:LAYERS": "Country_Labels",
"map0:HIGHLIGHT_GEOM": "POLYGON((-15000000 10000000, -15000000 6110620, 2500000 6110620, 2500000 10000000, -15000000 10000000))",
"map0:HIGHLIGHT_SYMBOL": "<StyledLayerDescriptor><UserStyle><Name>Highlight</Name><FeatureTypeStyle><Rule><Name>Symbol</Name><LineSymbolizer><Stroke><SvgParameter name=\"stroke\">%23ea1173</SvgParameter><SvgParameter name=\"stroke-opacity\">1</SvgParameter><SvgParameter name=\"stroke-width\">1.6</SvgParameter></Stroke></LineSymbolizer></Rule></FeatureTypeStyle></UserStyle></StyledLayerDescriptor>",
"map0:HIGHLIGHT_LABELSTRING": "Highlight Layer!",
"map0:HIGHLIGHT_LABELSIZE": "16",
"map0:HIGHLIGHT_LABELCOLOR": "%2300FF0000",
"map0:HIGHLIGHT_LABELBUFFERCOLOR": "%232300FF00",
"map0:HIGHLIGHT_LABELBUFFERSIZE": "1.5",
"CRS": "EPSG:3857"
}.items())])
r, h = self._result(self._execute_request(qs))
assert h.get("Content-Type").startswith('image'), r
self._img_diff_error(r, h, "WMS_GetPrint_Highlight")
def test_wms_getprint_label(self):
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetPrint",
"TEMPLATE": "layoutA4",
"FORMAT": "png",
"map0:EXTENT": "-33626185.498,-13032965.185,33978427.737,16020257.031",
"map0:LAYERS": "Country,Hello",
"CRS": "EPSG:3857",
"IDTEXTBOX": "Updated QGIS composer label"
}.items())])
r, h = self._result(self._execute_request(qs))
self._img_diff_error(r, h, "WMS_GetPrint_LabelUpdated")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(self.projectPath),
"SERVICE": "WMS",
"VERSION": "1.1.1",
"REQUEST": "GetPrint",
"TEMPLATE": "layoutA4",
"FORMAT": "png",
"map0:EXTENT": "-33626185.498,-13032965.185,33978427.737,16020257.031",
"map0:LAYERS": "Country,Hello",
"CRS": "EPSG:3857",
"IDTEXTBOX": ""
}.items())])
r, h = self._result(self._execute_request(qs))
self._img_diff_error(r, h, "WMS_GetPrint_LabelRemoved")
if __name__ == '__main__':
unittest.main()
|
denizdemir/presto
|
refs/heads/master
|
presto-docs/src/main/sphinx/conf.py
|
44
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Presto documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
import os
import sys
import xml.dom.minidom
try:
sys.dont_write_bytecode = True
except:
pass
sys.path.insert(0, os.path.abspath('ext'))
def child_node(node, name):
for i in node.childNodes:
if (i.nodeType == i.ELEMENT_NODE) and (i.tagName == name):
return i
return None
def node_text(node):
return node.childNodes[0].data
def maven_version(pom):
dom = xml.dom.minidom.parse(pom)
project = dom.childNodes[0]
version = child_node(project, 'version')
if version:
return node_text(version)
parent = child_node(project, 'parent')
version = child_node(parent, 'version')
return node_text(version)
def get_version():
version = os.environ.get('PRESTO_VERSION', '').strip()
return version or maven_version('../../../pom.xml')
# -- General configuration -----------------------------------------------------
needs_sphinx = '1.0'
extensions = ['download']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Presto'
version = get_version()
release = version
exclude_patterns = ['_build', 'rest*', 'overview/concepts*']
pygments_style = 'sphinx'
highlight_language = 'sql'
rst_epilog = """
.. |presto_server_release| replace:: ``presto-server-{release}``
""".replace('{release}', release)
# -- Options for HTML output ---------------------------------------------------
html_theme_path = ['./themes']
html_theme = 'presto'
html_title = '%s %s Documentation' % (project, release)
html_add_permalinks = None
html_show_copyright = False
html_show_sphinx = False
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/third_party/web-page-replay/third_party/dns/rdtypes/ANY/PTR.py
|
248
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class PTR(dns.rdtypes.nsbase.NSBase):
"""PTR record"""
pass
|
sontek/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/tools/push-to-trunk/git_recipes.py
|
36
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
SHA1_RE = re.compile('^[a-fA-F0-9]{40}$')
ROLL_DEPS_GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
# Regular expression that matches a single commit footer line.
COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s+(.+)')
# Footer metadata key for commit position.
COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
# Regular expression to parse a commit position
COMMIT_POSITION_RE = re.compile(r'(.+)@\{#(\d+)\}')
# Key for the 'git-svn' ID metadata commit footer entry.
GIT_SVN_ID_FOOTER_KEY = 'git-svn-id'
# e.g., git-svn-id: https://v8.googlecode.com/svn/trunk@23117
# ce2b1a6d-e550-0410-aec6-3dcde31c8c00
GIT_SVN_ID_RE = re.compile(r'((?:\w+)://[^@]+)@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
# Copied from bot_update.py.
def GetCommitMessageFooterMap(message):
"""Returns: (dict) A dictionary of commit message footer entries.
"""
footers = {}
# Extract the lines in the footer block.
lines = []
for line in message.strip().splitlines():
line = line.strip()
if len(line) == 0:
del(lines[:])
continue
lines.append(line)
# Parse the footer
for line in lines:
m = COMMIT_FOOTER_ENTRY_RE.match(line)
if not m:
# If any single line isn't valid, the entire footer is invalid.
footers.clear()
return footers
footers[m.group(1)] = m.group(2).strip()
return footers
class GitFailedException(Exception):
pass
def Strip(f):
def new_f(*args, **kwargs):
result = f(*args, **kwargs)
if result is None:
return result
else:
return result.strip()
return new_f
def MakeArgs(l):
"""['-a', '', 'abc', ''] -> '-a abc'"""
return " ".join(filter(None, l))
def Quoted(s):
return "\"%s\"" % s
class GitRecipesMixin(object):
def GitIsWorkdirClean(self, **kwargs):
return self.Git("status -s -uno", **kwargs).strip() == ""
@Strip
def GitBranch(self, **kwargs):
return self.Git("branch", **kwargs)
def GitCreateBranch(self, name, remote="", **kwargs):
assert name
remote_args = ["--upstream", remote] if remote else []
self.Git(MakeArgs(["new-branch", name] + remote_args), **kwargs)
def GitDeleteBranch(self, name, **kwargs):
assert name
self.Git(MakeArgs(["branch -D", name]), **kwargs)
def GitReset(self, name, **kwargs):
assert name
self.Git(MakeArgs(["reset --hard", name]), **kwargs)
def GitStash(self, **kwargs):
self.Git(MakeArgs(["stash"]), **kwargs)
def GitRemotes(self, **kwargs):
return map(str.strip,
self.Git(MakeArgs(["branch -r"]), **kwargs).splitlines())
def GitCheckout(self, name, **kwargs):
assert name
self.Git(MakeArgs(["checkout -f", name]), **kwargs)
def GitCheckoutFile(self, name, branch_or_hash, **kwargs):
assert name
assert branch_or_hash
self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]), **kwargs)
def GitCheckoutFileSafe(self, name, branch_or_hash, **kwargs):
try:
self.GitCheckoutFile(name, branch_or_hash, **kwargs)
except GitFailedException: # pragma: no cover
# The file doesn't exist in that revision.
return False
return True
def GitChangedFiles(self, git_hash, **kwargs):
assert git_hash
try:
files = self.Git(MakeArgs(["diff --name-only",
git_hash,
"%s^" % git_hash]), **kwargs)
return map(str.strip, files.splitlines())
except GitFailedException: # pragma: no cover
# Git fails using "^" at branch roots.
return []
@Strip
def GitCurrentBranch(self, **kwargs):
for line in self.Git("status -s -b -uno", **kwargs).strip().splitlines():
match = re.match(r"^## (.+)", line)
if match: return match.group(1)
raise Exception("Couldn't find curent branch.") # pragma: no cover
@Strip
def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
branch="", reverse=False, **kwargs):
assert not (git_hash and parent_hash)
args = ["log"]
if n > 0:
args.append("-%d" % n)
if format:
args.append("--format=%s" % format)
if grep:
args.append("--grep=\"%s\"" % grep.replace("\"", "\\\""))
if reverse:
args.append("--reverse")
if git_hash:
args.append(git_hash)
if parent_hash:
args.append("%s^" % parent_hash)
args.append(branch)
return self.Git(MakeArgs(args), **kwargs)
def GitGetPatch(self, git_hash, **kwargs):
assert git_hash
return self.Git(MakeArgs(["log", "-1", "-p", git_hash]), **kwargs)
# TODO(machenbach): Unused? Remove.
def GitAdd(self, name, **kwargs):
assert name
self.Git(MakeArgs(["add", Quoted(name)]), **kwargs)
def GitApplyPatch(self, patch_file, reverse=False, **kwargs):
assert patch_file
args = ["apply --index --reject"]
if reverse:
args.append("--reverse")
args.append(Quoted(patch_file))
self.Git(MakeArgs(args), **kwargs)
def GitUpload(self, reviewer="", author="", force=False, cq=False,
bypass_hooks=False, cc="", **kwargs):
args = ["cl upload --send-mail"]
if author:
args += ["--email", Quoted(author)]
if reviewer:
args += ["-r", Quoted(reviewer)]
if force:
args.append("-f")
if cq:
args.append("--use-commit-queue")
if bypass_hooks:
args.append("--bypass-hooks")
if cc:
args += ["--cc", Quoted(cc)]
# TODO(machenbach): Check output in forced mode. Verify that all required
# base files were uploaded, if not retry.
self.Git(MakeArgs(args), pipe=False, **kwargs)
def GitCommit(self, message="", file_name="", author=None, **kwargs):
assert message or file_name
args = ["commit"]
if file_name:
args += ["-aF", Quoted(file_name)]
if message:
args += ["-am", Quoted(message)]
if author:
args += ["--author", "\"%s <%s>\"" % (author, author)]
self.Git(MakeArgs(args), **kwargs)
def GitPresubmit(self, **kwargs):
self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"", **kwargs)
def GitDCommit(self, **kwargs):
self.Git(
"cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
def GitCLLand(self, **kwargs):
self.Git(
"cl land -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
def GitDiff(self, loc1, loc2, **kwargs):
return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs)
def GitPull(self, **kwargs):
self.Git("pull", **kwargs)
def GitFetchOrigin(self, **kwargs):
self.Git("fetch origin", **kwargs)
def GitConvertToSVNRevision(self, git_hash, **kwargs):
result = self.Git(MakeArgs(["rev-list", "-n", "1", git_hash]), **kwargs)
if not result or not SHA1_RE.match(result):
raise GitFailedException("Git hash %s is unknown." % git_hash)
log = self.GitLog(n=1, format="%B", git_hash=git_hash, **kwargs)
for line in reversed(log.splitlines()):
match = ROLL_DEPS_GIT_SVN_ID_RE.match(line.strip())
if match:
return match.group(1)
raise GitFailedException("Couldn't convert %s to SVN." % git_hash)
@Strip
# Copied from bot_update.py and modified for svn-like numbers only.
def GetCommitPositionNumber(self, git_hash, **kwargs):
"""Dumps the 'git' log for a specific revision and parses out the commit
position number.
If a commit position metadata key is found, its number will be returned.
Otherwise, we will search for a 'git-svn' metadata entry. If one is found,
its SVN revision value is returned.
"""
git_log = self.GitLog(format='%B', n=1, git_hash=git_hash, **kwargs)
footer_map = GetCommitMessageFooterMap(git_log)
# Search for commit position metadata
value = footer_map.get(COMMIT_POSITION_FOOTER_KEY)
if value:
match = COMMIT_POSITION_RE.match(value)
if match:
return match.group(2)
# Extract the svn revision from 'git-svn' metadata
value = footer_map.get(GIT_SVN_ID_FOOTER_KEY)
if value:
match = GIT_SVN_ID_RE.match(value)
if match:
return match.group(2)
return None
### Git svn stuff
def GitSVNFetch(self, **kwargs):
self.Git("svn fetch", **kwargs)
def GitSVNRebase(self, **kwargs):
self.Git("svn rebase", **kwargs)
# TODO(machenbach): Unused? Remove.
@Strip
def GitSVNLog(self, **kwargs):
return self.Git("svn log -1 --oneline", **kwargs)
@Strip
def GitSVNFindGitHash(self, revision, branch="", **kwargs):
assert revision
args = MakeArgs(["svn find-rev", "r%s" % revision, branch])
# Pick the last line if multiple lines are available. The first lines might
# print information about rebuilding the svn-git mapping.
return self.Git(args, **kwargs).splitlines()[-1]
@Strip
def GitSVNFindSVNRev(self, git_hash, branch="", **kwargs):
return self.Git(MakeArgs(["svn find-rev", git_hash, branch]), **kwargs)
def GitSVNDCommit(self, **kwargs):
return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None, **kwargs)
def GitSVNTag(self, version, **kwargs):
self.Git(("svn tag %s -m \"Tagging version %s\"" % (version, version)),
retry_on=lambda x: x is None,
**kwargs)
|
DarKnight24/owtf
|
refs/heads/develop
|
plugins/web/passive/Testing_for_Captcha@OWTF-AT-008.py
|
2
|
"""
PASSIVE Plugin for Testing for Captcha (OWASP-AT-008)
"""
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Google Hacking for CAPTCHA"
def run(PluginInfo):
resource = ServiceLocator.get_component("resource").GetResources('PassiveCAPTCHALnk')
Content = ServiceLocator.get_component("plugin_helper").ResourceLinkList('Online Resources', resource)
return Content
|
afonsoduarte/gardensquareproject-site
|
refs/heads/master
|
templates/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSToolFile.py
|
2736
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
|
nishad-jobsglobal/odoo-marriot
|
refs/heads/master
|
addons/website_forum/models/res_users.py
|
281
|
# -*- coding: utf-8 -*-
from datetime import datetime
from urllib import urlencode
import hashlib
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
class Users(osv.Model):
_inherit = 'res.users'
def __init__(self, pool, cr):
init_res = super(Users, self).__init__(pool, cr)
self.SELF_WRITEABLE_FIELDS = list(
set(
self.SELF_WRITEABLE_FIELDS +
['country_id', 'city', 'website', 'website_description', 'website_published']))
return init_res
def _get_user_badge_level(self, cr, uid, ids, name, args, context=None):
"""Return total badge per level of users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool['gamification.badge.user']
for id in ids:
result[id] = {
'gold_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'gold'), ('user_id', '=', id)], context=context, count=True),
'silver_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'silver'), ('user_id', '=', id)], context=context, count=True),
'bronze_badge': badge_user_obj.search(cr, uid, [('badge_id.level', '=', 'bronze'), ('user_id', '=', id)], context=context, count=True),
}
return result
_columns = {
'create_date': fields.datetime('Create Date', select=True, readonly=True),
'karma': fields.integer('Karma'),
'badge_ids': fields.one2many('gamification.badge.user', 'user_id', 'Badges'),
'gold_badge': fields.function(_get_user_badge_level, string="Number of gold badges", type='integer', multi='badge_level'),
'silver_badge': fields.function(_get_user_badge_level, string="Number of silver badges", type='integer', multi='badge_level'),
'bronze_badge': fields.function(_get_user_badge_level, string="Number of bronze badges", type='integer', multi='badge_level'),
}
_defaults = {
'karma': 0,
}
def _generate_forum_token(self, cr, uid, user_id, email):
"""Return a token for email validation. This token is valid for the day
and is a hash based on a (secret) uuid generated by the forum module,
the user_id, the email and currently the day (to be updated if necessary). """
forum_uuid = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'website_forum.uuid')
return hashlib.sha256('%s-%s-%s-%s' % (
datetime.now().replace(hour=0, minute=0, second=0, microsecond=0),
forum_uuid,
user_id,
email)).hexdigest()
def send_forum_validation_email(self, cr, uid, user_id, forum_id=None, context=None):
user = self.pool['res.users'].browse(cr, uid, user_id, context=context)
token = self._generate_forum_token(cr, uid, user_id, user.email)
activation_template_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'website_forum.validation_email')
if activation_template_id:
params = {
'token': token,
'id': user_id,
'email': user.email}
if forum_id:
params['forum_id'] = forum_id
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
token_url = base_url + '/forum/validate_email?%s' % urlencode(params)
tpl_ctx = dict(context, token_url=token_url)
self.pool['email.template'].send_mail(cr, SUPERUSER_ID, activation_template_id, user_id, force_send=True, context=tpl_ctx)
return True
def process_forum_validation_token(self, cr, uid, token, user_id, email, forum_id=None, context=None):
validation_token = self.pool['res.users']._generate_forum_token(cr, uid, user_id, email)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_id, context=context)
if token == validation_token and user.karma == 0:
karma = 3
if not forum_id:
forum_ids = self.pool['forum.forum'].search(cr, uid, [], limit=1, context=context)
if forum_ids:
forum_id = forum_ids[0]
if forum_id:
forum = self.pool['forum.forum'].browse(cr, uid, forum_id, context=context)
# karma gained: karma to ask a question and have 2 downvotes
karma = forum.karma_ask + (-2 * forum.karma_gen_question_downvote)
return user.write({'karma': karma})
return False
def add_karma(self, cr, uid, ids, karma, context=None):
for user in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [user.id], {'karma': user.karma + karma}, context=context)
return True
def get_serialised_gamification_summary(self, cr, uid, excluded_categories=None, context=None):
if isinstance(excluded_categories, list):
if 'forum' not in excluded_categories:
excluded_categories.append('forum')
else:
excluded_categories = ['forum']
return super(Users, self).get_serialised_gamification_summary(cr, uid, excluded_categories=excluded_categories, context=context)
|
Uli1/mapnik
|
refs/heads/master
|
scons/scons-local-2.4.0/SCons/Tool/386asm.py
|
1
|
"""SCons.Tool.386asm
Tool specification for the 386ASM assembler for the Phar Lap ETS embedded
operating system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/386asm.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
from SCons.Tool.PharLapCommon import addPharLapPaths
import SCons.Util
as_module = __import__('as', globals(), locals(), [])
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
as_module.generate(env)
env['AS'] = '386asm'
env['ASFLAGS'] = SCons.Util.CLVar('')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS $SOURCES -o $TARGET'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $SOURCES -o $TARGET'
addPharLapPaths(env)
def exists(env):
return env.Detect('386asm')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
invisiblek/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/lib2to3/tests/__init__.py
|
308
|
"""Make tests/ into a package. This allows us to "import tests" and
have tests.all_tests be a TestSuite representing all test cases
from all test_*.py files in tests/."""
# Author: Collin Winter
import os
import os.path
import unittest
import types
from . import support
all_tests = unittest.TestSuite()
tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
tests = [t[0:-3] for t in os.listdir(tests_dir)
if t.startswith('test_') and t.endswith('.py')]
loader = unittest.TestLoader()
for t in tests:
__import__("",globals(),locals(),[t],level=1)
mod = globals()[t]
all_tests.addTests(loader.loadTestsFromModule(mod))
|
botswana-harvard/bcpp-interview
|
refs/heads/develop
|
bcpp_interview/model_to_dataframe.py
|
1
|
import pandas as pd
import numpy as np
class EdcModelToDataFrame(object):
"""
e = EdcModelToDataFrame(ClinicVlResult, add_columns_for='clinic_visit')
my_df = e.dataframe
"""
def __init__(self, model=None, queryset=None, query_filter=None, add_columns_for=None):
self._columns = []
self.dataframe = pd.DataFrame()
query_filter = query_filter or {}
self.queryset = queryset or model.objects.all()
self.model = model or self.queryset.model
if self.queryset.count() > 0:
self.model = model or self.queryset.model
self.values_list = self.queryset.values_list(*self.columns.keys()).filter(**query_filter)
self.dataframe = pd.DataFrame(list(self.values_list), columns=self.columns.keys())
self.dataframe.rename(columns=self.columns, inplace=True)
self.dataframe.fillna(value=np.nan, inplace=True)
for column in list(self.dataframe.select_dtypes(include=['datetime64[ns, UTC]']).columns):
self.dataframe[column] = self.dataframe[column].astype('datetime64[ns]')
def __repr__(self):
return '{}({}.{})'.format(
self.__class__.__name__, self.model._meta.app_label, self.model._meta.model_name)
def __str__(self):
return '{}.{}'.format(self.model._meta.app_label, self.model._meta.model_name)
@property
def columns(self):
if not self._columns:
columns = self.remove_sys_columns(list(self.queryset[0].__dict__.keys()))
self._columns = dict(zip(columns, columns))
return self._columns
def remove_sys_columns(self, columns):
names = ['_state', '_user_container_instance', 'using']
for name in names:
try:
columns.remove(name)
except ValueError:
pass
return columns
|
jhawkesworth/ansible
|
refs/heads/devel
|
test/runner/lib/target.py
|
14
|
"""Test target identification, iteration and inclusion/exclusion."""
from __future__ import absolute_import, print_function
import collections
import os
import re
import errno
import itertools
import abc
import sys
from lib.util import (
ApplicationError,
display,
read_lines_without_comments,
)
MODULE_EXTENSIONS = '.py', '.ps1'
def find_target_completion(target_func, prefix):
"""
:type target_func: () -> collections.Iterable[CompletionTarget]
:type prefix: unicode
:rtype: list[str]
"""
try:
targets = target_func()
if sys.version_info[0] == 2:
prefix = prefix.encode()
short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash
matches = walk_completion_targets(targets, prefix, short)
return matches
except Exception as ex: # pylint: disable=locally-disabled, broad-except
return [u'%s' % ex]
def walk_completion_targets(targets, prefix, short=False):
"""
:type targets: collections.Iterable[CompletionTarget]
:type prefix: str
:type short: bool
:rtype: tuple[str]
"""
aliases = set(alias for target in targets for alias in target.aliases)
if prefix.endswith('/') and prefix in aliases:
aliases.remove(prefix)
matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
if short:
offset = len(os.path.dirname(prefix))
if offset:
offset += 1
relative_matches = [match[offset:] for match in matches if len(match) > offset]
if len(relative_matches) > 1:
matches = relative_matches
return tuple(sorted(matches))
def walk_internal_targets(targets, includes=None, excludes=None, requires=None):
"""
:type targets: collections.Iterable[T <= CompletionTarget]
:type includes: list[str]
:type excludes: list[str]
:type requires: list[str]
:rtype: tuple[T <= CompletionTarget]
"""
targets = tuple(targets)
include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda t: t.name)
if requires:
require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
include_targets = [target for target in include_targets if target in require_targets]
if excludes:
list(filter_targets(targets, excludes, errors=True, include=False, directories=False))
internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False))
return tuple(sorted(internal_targets, key=lambda t: t.name))
def walk_external_targets(targets, includes=None, excludes=None, requires=None):
"""
:type targets: collections.Iterable[CompletionTarget]
:type includes: list[str]
:type excludes: list[str]
:type requires: list[str]
:rtype: tuple[CompletionTarget], tuple[CompletionTarget]
"""
targets = tuple(targets)
if requires:
include_targets = list(filter_targets(targets, includes, errors=True, directories=False))
require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
includes = [target.name for target in include_targets if target in require_targets]
if includes:
include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name)
else:
include_targets = []
else:
include_targets = sorted(filter_targets(targets, includes, errors=True), key=lambda t: t.name)
if excludes:
exclude_targets = sorted(filter_targets(targets, excludes, errors=True), key=lambda t: t.name)
else:
exclude_targets = []
previous = None
include = []
for target in include_targets:
if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \
and previous.name == target.name:
previous.modules = tuple(set(previous.modules) | set(target.modules))
else:
include.append(target)
previous = target
previous = None
exclude = []
for target in exclude_targets:
if isinstance(previous, DirectoryTarget) and isinstance(target, DirectoryTarget) \
and previous.name == target.name:
previous.modules = tuple(set(previous.modules) | set(target.modules))
else:
exclude.append(target)
previous = target
return tuple(include), tuple(exclude)
def filter_targets(targets, patterns, include=True, directories=True, errors=True):
"""
:type targets: collections.Iterable[CompletionTarget]
:type patterns: list[str]
:type include: bool
:type directories: bool
:type errors: bool
:rtype: collections.Iterable[CompletionTarget]
"""
unmatched = set(patterns or ())
compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
for target in targets:
matched_directories = set()
match = False
if patterns:
for alias in target.aliases:
for pattern in patterns:
if compiled_patterns[pattern].match(alias):
match = True
try:
unmatched.remove(pattern)
except KeyError:
pass
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
elif include:
match = True
if not target.base_path:
matched_directories.add('.')
for alias in target.aliases:
if alias.endswith('/'):
if target.base_path and len(target.base_path) > len(alias):
matched_directories.add(target.base_path)
else:
matched_directories.add(alias)
if match != include:
continue
if directories and matched_directories:
yield DirectoryTarget(sorted(matched_directories, key=len)[0], target.modules)
else:
yield target
if errors:
if unmatched:
raise TargetPatternsNotMatched(unmatched)
def walk_module_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
path = 'lib/ansible/modules'
for target in walk_test_targets(path, path + '/', extensions=MODULE_EXTENSIONS):
if not target.module:
continue
yield target
def walk_units_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(path='test/units', module_path='test/units/modules/', extensions=('.py',), prefix='test_')
def walk_compile_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path='lib/ansible/modules/', extensions=('.py',), extra_dirs=('bin',))
def walk_sanity_targets():
"""
:rtype: collections.Iterable[TestTarget]
"""
return walk_test_targets(module_path='lib/ansible/modules/')
def walk_posix_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
yield target
def walk_network_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
yield target
def walk_windows_integration_targets(include_hidden=False):
"""
:type include_hidden: bool
:rtype: collections.Iterable[IntegrationTarget]
"""
for target in walk_integration_targets():
if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
yield target
def walk_integration_targets():
"""
:rtype: collections.Iterable[IntegrationTarget]
"""
path = 'test/integration/targets'
modules = frozenset(t.module for t in walk_module_targets())
paths = sorted(os.path.join(path, p) for p in os.listdir(path))
prefixes = load_integration_prefixes()
for path in paths:
if os.path.isdir(path):
yield IntegrationTarget(path, modules, prefixes)
def load_integration_prefixes():
"""
:rtype: dict[str, str]
"""
path = 'test/integration'
names = sorted(f for f in os.listdir(path) if os.path.splitext(f)[0] == 'target-prefixes')
prefixes = {}
for name in names:
prefix = os.path.splitext(name)[1][1:]
with open(os.path.join(path, name), 'r') as prefix_fd:
prefixes.update(dict((k, prefix) for k in prefix_fd.read().splitlines()))
return prefixes
def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None, extra_dirs=None):
"""
:type path: str | None
:type module_path: str | None
:type extensions: tuple[str] | None
:type prefix: str | None
:type extra_dirs: tuple[str] | None
:rtype: collections.Iterable[TestTarget]
"""
for root, _, file_names in os.walk(path or '.', topdown=False):
if root.endswith('/__pycache__'):
continue
if '/.tox/' in root:
continue
if path is None:
root = root[2:]
if root.startswith('.') and root != '.github':
continue
for file_name in file_names:
name, ext = os.path.splitext(os.path.basename(file_name))
if name.startswith('.'):
continue
if extensions and ext not in extensions:
continue
if prefix and not name.startswith(prefix):
continue
file_path = os.path.join(root, file_name)
if os.path.islink(file_path):
# special case to allow a symlink of ansible_release.py -> ../release.py
if file_path != 'lib/ansible/module_utils/ansible_release.py':
continue
yield TestTarget(file_path, module_path, prefix, path)
if extra_dirs:
for extra_dir in extra_dirs:
file_names = os.listdir(extra_dir)
for file_name in file_names:
file_path = os.path.join(extra_dir, file_name)
if os.path.isfile(file_path) and not os.path.islink(file_path):
yield TestTarget(file_path, module_path, prefix, path)
def analyze_integration_target_dependencies(integration_targets):
"""
:type integration_targets: list[IntegrationTarget]
:rtype: dict[str,set[str]]
"""
real_target_root = os.path.realpath('test/integration/targets') + '/'
role_targets = [t for t in integration_targets if t.type == 'role']
hidden_role_target_names = set(t.name for t in role_targets if 'hidden/' in t.aliases)
dependencies = collections.defaultdict(set)
# handle setup dependencies
for target in integration_targets:
for setup_target_name in target.setup_always + target.setup_once:
dependencies[setup_target_name].add(target.name)
# handle target dependencies
for target in integration_targets:
for need_target in target.needs_target:
dependencies[need_target].add(target.name)
# handle symlink dependencies between targets
# this use case is supported, but discouraged
for target in integration_targets:
for root, _dummy, file_names in os.walk(target.path):
for name in file_names:
path = os.path.join(root, name)
if not os.path.islink(path):
continue
real_link_path = os.path.realpath(path)
if not real_link_path.startswith(real_target_root):
continue
link_target = real_link_path[len(real_target_root):].split('/')[0]
if link_target == target.name:
continue
dependencies[link_target].add(target.name)
# intentionally primitive analysis of role meta to avoid a dependency on pyyaml
# script based targets are scanned as they may execute a playbook with role dependencies
for target in integration_targets:
meta_dir = os.path.join(target.path, 'meta')
if not os.path.isdir(meta_dir):
continue
meta_paths = sorted([os.path.join(meta_dir, name) for name in os.listdir(meta_dir)])
for meta_path in meta_paths:
if os.path.exists(meta_path):
with open(meta_path, 'rb') as meta_fd:
# try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
try:
meta_lines = meta_fd.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
continue
for meta_line in meta_lines:
if re.search(r'^ *#.*$', meta_line):
continue
if not meta_line.strip():
continue
for hidden_target_name in hidden_role_target_names:
if hidden_target_name in meta_line:
dependencies[hidden_target_name].add(target.name)
while True:
changes = 0
for dummy, dependent_target_names in dependencies.items():
for dependent_target_name in list(dependent_target_names):
new_target_names = dependencies.get(dependent_target_name)
if new_target_names:
for new_target_name in new_target_names:
if new_target_name not in dependent_target_names:
dependent_target_names.add(new_target_name)
changes += 1
if not changes:
break
for target_name in sorted(dependencies):
consumers = dependencies[target_name]
if not consumers:
continue
display.info('%s:' % target_name, verbosity=4)
for consumer in sorted(consumers):
display.info(' %s' % consumer, verbosity=4)
return dependencies
class CompletionTarget(object):
"""Command-line argument completion target base class."""
__metaclass__ = abc.ABCMeta
def __init__(self):
self.name = None
self.path = None
self.base_path = None
self.modules = tuple()
self.aliases = tuple()
def __eq__(self, other):
if isinstance(other, CompletionTarget):
return self.__repr__() == other.__repr__()
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.name.__lt__(other.name)
def __gt__(self, other):
return self.name.__gt__(other.name)
def __hash__(self):
return hash(self.__repr__())
def __repr__(self):
if self.modules:
return '%s (%s)' % (self.name, ', '.join(self.modules))
return self.name
class DirectoryTarget(CompletionTarget):
"""Directory target."""
def __init__(self, path, modules):
"""
:type path: str
:type modules: tuple[str]
"""
super(DirectoryTarget, self).__init__()
self.name = path
self.path = path
self.modules = modules
class TestTarget(CompletionTarget):
"""Generic test target."""
def __init__(self, path, module_path, module_prefix, base_path):
"""
:type path: str
:type module_path: str | None
:type module_prefix: str | None
:type base_path: str
"""
super(TestTarget, self).__init__()
self.name = path
self.path = path
self.base_path = base_path + '/' if base_path else None
name, ext = os.path.splitext(os.path.basename(self.path))
if module_path and path.startswith(module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
self.module = name[len(module_prefix or ''):].lstrip('_')
self.modules = (self.module,)
else:
self.module = None
self.modules = tuple()
aliases = [self.path, self.module]
parts = self.path.split('/')
for i in range(1, len(parts)):
alias = '%s/' % '/'.join(parts[:i])
aliases.append(alias)
aliases = [a for a in aliases if a]
self.aliases = tuple(sorted(aliases))
class IntegrationTarget(CompletionTarget):
"""Integration test target."""
non_posix = frozenset((
'network',
'windows',
))
categories = frozenset(non_posix | frozenset((
'posix',
'module',
'needs',
'skip',
)))
def __init__(self, path, modules, prefixes):
"""
:type path: str
:type modules: frozenset[str]
:type prefixes: dict[str, str]
"""
super(IntegrationTarget, self).__init__()
self.name = os.path.basename(path)
self.path = path
# script_path and type
contents = sorted(os.listdir(path))
runme_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'runme')
test_files = tuple(c for c in contents if os.path.splitext(c)[0] == 'test')
self.script_path = None
if runme_files:
self.type = 'script'
self.script_path = os.path.join(path, runme_files[0])
elif test_files:
self.type = 'special'
elif os.path.isdir(os.path.join(path, 'tasks')) or os.path.isdir(os.path.join(path, 'defaults')):
self.type = 'role'
else:
self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
# static_aliases
try:
aliases_path = os.path.join(path, 'aliases')
static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
static_aliases = tuple()
# modules
if self.name in modules:
module_name = self.name
elif self.name.startswith('win_') and self.name[4:] in modules:
module_name = self.name[4:]
else:
module_name = None
self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
# groups
groups = [self.type]
groups += [a for a in static_aliases if a not in modules]
groups += ['module/%s' % m for m in self.modules]
if not self.modules:
groups.append('non_module')
if 'destructive' not in groups:
groups.append('non_destructive')
if '_' in self.name:
prefix = self.name[:self.name.find('_')]
else:
prefix = None
if prefix in prefixes:
group = prefixes[prefix]
if group != prefix:
group = '%s/%s' % (group, prefix)
groups.append(group)
if self.name.startswith('win_'):
groups.append('windows')
if self.name.startswith('connection_'):
groups.append('connection')
if self.name.startswith('setup_') or self.name.startswith('prepare_'):
groups.append('hidden')
if self.type not in ('script', 'role'):
groups.append('hidden')
# Collect file paths before group expansion to avoid including the directories.
# Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
g.startswith('needs/file/') and not g.startswith('needs/file/test/integration/targets/'))))
for group in itertools.islice(groups, 0, len(groups)):
if '/' in group:
parts = group.split('/')
for i in range(1, len(parts)):
groups.append('/'.join(parts[:i]))
if not any(g in self.non_posix for g in groups):
groups.append('posix')
# aliases
aliases = [self.name] + \
['%s/' % g for g in groups] + \
['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
if 'hidden/' in aliases:
aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
self.aliases = tuple(sorted(set(aliases)))
# configuration
self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
class TargetPatternsNotMatched(ApplicationError):
"""One or more targets were not matched when a match was required."""
def __init__(self, patterns):
"""
:type patterns: set[str]
"""
self.patterns = sorted(patterns)
if len(patterns) > 1:
message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
else:
message = 'Target pattern not matched: %s' % self.patterns[0]
super(TargetPatternsNotMatched, self).__init__(message)
|
vladikr/nova_drafts
|
refs/heads/master
|
nova/tests/api/openstack/compute/plugins/v3/test_lock_server.py
|
28
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.plugins.v3 import lock_server
from nova import exception
from nova.tests.api.openstack.compute.plugins.v3 import \
admin_only_action_common
from nova.tests.api.openstack import fakes
class LockServerTests(admin_only_action_common.CommonTests):
def setUp(self):
super(LockServerTests, self).setUp()
self.controller = lock_server.LockServerController()
self.compute_api = self.controller.compute_api
def _fake_controller(*args, **kwargs):
return self.controller
self.stubs.Set(lock_server, 'LockServerController',
_fake_controller)
self.app = fakes.wsgi_app_v3(init_only=('servers',
'os-lock-server'),
fake_auth_context=self.context)
self.mox.StubOutWithMock(self.compute_api, 'get')
def test_lock_unlock(self):
self._test_actions(['lock', 'unlock'])
def test_lock_unlock_with_non_existed_instance(self):
self._test_actions_with_non_existed_instance(['lock', 'unlock'])
def test_unlock_not_authorized(self):
self.mox.StubOutWithMock(self.compute_api, 'unlock')
instance = self._stub_instance_get()
self.compute_api.unlock(self.context, instance).AndRaise(
exception.PolicyNotAuthorized(action='unlock'))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{'unlock': None})
self.assertEqual(403, res.status_int)
|
KolevDarko/flasky-extended
|
refs/heads/master
|
app/auth/views.py
|
1
|
from flask import render_template, redirect, request, url_for, flash, session, make_response
from flask.ext.login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db, socketio
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated():
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
session['user_id'] = user.id
resp = make_response(redirect(request.args.get('next') or url_for('main.index')))
resp.set_cookie('user_id', str(user.id), max_age=30*24*60*60)
return resp
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
def send_offline_signal():
data = dict()
data['status'] = "offline";
data['id'] = session['user_id']
socketio.emit('statusUpdate', data, room="status_"+session['user_id'], namespace='/status')
@auth.route('/logout')
@login_required
def logout():
send_offline_signal()
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
|
maggienj/ActiveData
|
refs/heads/es5
|
mo_json/typed_encoder.py
|
1
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
import time
from collections import deque
from datetime import datetime, date, timedelta
from decimal import Decimal
from mo_logs import Log
from mo_logs.strings import utf82unicode
from mo_times.dates import Date
from mo_times.durations import Duration
from mo_dots import Data, FlatList, NullType
from mo_json import ESCAPE_DCT, float2json
from mo_json.encoder import pretty_json, problem_serializing, _repr, UnicodeBuilder
json_decoder = json.JSONDecoder().decode
append = UnicodeBuilder.append
def typed_encode(value):
"""
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
"""
try:
_buffer = UnicodeBuilder(1024)
_typed_encode(value, _buffer)
output = _buffer.build()
return output
except Exception as e:
# THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
from mo_logs import Log
Log.warning("Serialization of JSON problems", e)
try:
return pretty_json(value)
except Exception, f:
Log.error("problem serializing object", f)
def _typed_encode(value, _buffer):
try:
if value is None:
append(_buffer, u'{}')
return
elif value is True:
append(_buffer, u'{"$boolean": true}')
return
elif value is False:
append(_buffer, u'{"$boolean": false}')
return
_type = value.__class__
if _type in (dict, Data):
if value:
_dict2json(value, _buffer)
else:
append(_buffer, u'{"$object": "."}')
elif _type is str:
append(_buffer, u'{"$string": "')
try:
v = utf82unicode(value)
except Exception as e:
raise problem_serializing(value, e)
for c in v:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, u'"}')
elif _type is unicode:
append(_buffer, u'{"$string": "')
for c in value:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, u'"}')
elif _type in (int, long, Decimal):
append(_buffer, u'{"$number": ')
append(_buffer, float2json(value))
append(_buffer, u'}')
elif _type is float:
append(_buffer, u'{"$number": ')
append(_buffer, float2json(value))
append(_buffer, u'}')
elif _type in (set, list, tuple, FlatList):
_list2json(value, _buffer)
elif _type is date:
append(_buffer, u'{"$number": ')
append(_buffer, float2json(time.mktime(value.timetuple())))
append(_buffer, u'}')
elif _type is datetime:
append(_buffer, u'{"$number": ')
append(_buffer, float2json(time.mktime(value.timetuple())))
append(_buffer, u'}')
elif _type is Date:
append(_buffer, u'{"$number": ')
append(_buffer, float2json(time.mktime(value.value.timetuple())))
append(_buffer, u'}')
elif _type is timedelta:
append(_buffer, u'{"$number": ')
append(_buffer, float2json(value.total_seconds()))
append(_buffer, u'}')
elif _type is Duration:
append(_buffer, u'{"$number": ')
append(_buffer, float2json(value.seconds))
append(_buffer, u'}')
elif _type is NullType:
append(_buffer, u"null")
elif hasattr(value, '__json__'):
j = value.__json__()
t = json2typed(j)
append(_buffer, t)
elif hasattr(value, '__iter__'):
_iter2json(value, _buffer)
else:
from mo_logs import Log
Log.error(_repr(value) + " is not JSON serializable")
except Exception as e:
from mo_logs import Log
Log.error(_repr(value) + " is not JSON serializable", e)
def _list2json(value, _buffer):
if not value:
append(_buffer, u"[]")
else:
sep = u"["
for v in value:
append(_buffer, sep)
sep = u", "
_typed_encode(v, _buffer)
append(_buffer, u"]")
def _iter2json(value, _buffer):
append(_buffer, u"[")
sep = u""
for v in value:
append(_buffer, sep)
sep = u", "
_typed_encode(v, _buffer)
append(_buffer, u"]")
def _dict2json(value, _buffer):
prefix = u'{"$object": ".", "'
for k, v in value.iteritems():
append(_buffer, prefix)
prefix = u", \""
if isinstance(k, str):
k = utf82unicode(k)
if not isinstance(k, unicode):
Log.error("Expecting property name to be a string")
for c in k:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, u"\": ")
_typed_encode(v, _buffer)
append(_buffer, u"}")
VALUE = 0
PRIMITIVE = 1
BEGIN_OBJECT = 2
OBJECT = 3
KEYWORD = 4
STRING = 6
ESCAPE = 5
def json2typed(json):
"""
every ': {' gets converted to ': {"$object": ".", '
every ': <value>' gets converted to '{"$value": <value>}'
"""
# MODE VALUES
#
context = deque()
output = UnicodeBuilder(1024)
mode = VALUE
for c in json:
if c in "\t\r\n ":
append(output, c)
elif mode == VALUE:
if c == "{":
context.append(mode)
mode = BEGIN_OBJECT
append(output, '{"$object": "."')
continue
elif c == '[':
context.append(mode)
mode = VALUE
elif c == ",":
mode = context.pop()
if mode != OBJECT:
context.append(mode)
mode = VALUE
elif c in "]":
mode = context.pop()
elif c in "}":
mode = context.pop()
mode = context.pop()
elif c == '"':
context.append(mode)
mode = STRING
append(output, '{"$value": ')
else:
mode = PRIMITIVE
append(output, '{"$value": ')
append(output, c)
elif mode == PRIMITIVE:
if c == ",":
append(output, '}')
mode = context.pop()
if mode == 0:
context.append(mode)
elif c == "]":
append(output, '}')
mode = context.pop()
elif c == "}":
append(output, '}')
mode = context.pop()
mode = context.pop()
append(output, c)
elif mode == BEGIN_OBJECT:
if c == '"':
context.append(OBJECT)
context.append(KEYWORD)
mode = STRING
append(output, ', ')
elif c == "}":
mode = context.pop()
else:
Log.error("not expected")
append(output, c)
elif mode == KEYWORD:
append(output, c)
if c == ':':
mode = VALUE
else:
Log.error("Not expected")
elif mode == STRING:
append(output, c)
if c == '"':
mode = context.pop()
if mode != KEYWORD:
append(output, '}')
elif c == '\\':
context.append(mode)
mode = ESCAPE
elif mode == ESCAPE:
mode = context.pop()
append(output, c)
elif mode == OBJECT:
if c == '"':
context.append(mode)
context.append(KEYWORD)
mode = STRING
elif c == ",":
pass
elif c == '}':
mode = context.pop()
else:
Log.error("not expected")
append(output, c)
if mode == PRIMITIVE:
append(output, "}")
return output.build()
def encode_property(name):
return name.replace("\\.", ".")
def decode_property(encoded):
return encoded
|
tpodowd/boto
|
refs/heads/master
|
boto/configservice/__init__.py
|
6
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS Config service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.kms.layer1 import ConfigServiceConnection
return get_regions('configservice', connection_cls=ConfigServiceConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
ryanpitts/source
|
refs/heads/master
|
source/base/urls.py
|
1
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.decorators.cache import cache_page
from .feeds import ArticleFeed
from .views import SourceSearchView, HomepageView
from haystack.forms import SearchForm
from haystack.query import SearchQuerySet
from haystack.views import search_view_factory
from source.articles.views import ArticleList, ArticleDetail
from source.utils.caching import ClearCache
STANDARD_CACHE_TIME = getattr(settings, 'CACHE_MIDDLEWARE_SECONDS', 60*15)
FEED_CACHE_TIME = getattr(settings, 'FEED_CACHE_SECONDS', 60*15)
urlpatterns = patterns('',
url(
regex = '^$',
view = cache_page(HomepageView.as_view(template_name='homepage.html'), STANDARD_CACHE_TIME),
kwargs = {},
name = 'homepage',
),
(r'^articles/', include('source.articles.urls')),
(r'^code/', include('source.code.urls')),
(r'^guides/', include('source.guides.urls')),
(r'^jobs/', include('source.jobs.urls')),
(r'^organizations/', include('source.people.urls.organizations')),
(r'^people/', include('source.people.urls.people')),
(r'^api/1.0/', include('source.api.urls')),
url(
regex = '^search/$',
view = search_view_factory(view_class=SourceSearchView, form_class=SearchForm, searchqueryset=SearchQuerySet().order_by('django_ct')),
kwargs = {},
name = 'haystack_search',
),
url(
regex = '^clear-cache/$',
view = ClearCache.as_view(),
kwargs = {},
name = 'clear_cache',
),
url(
regex = '^rss/$',
view = cache_page(ArticleFeed(), FEED_CACHE_TIME),
kwargs = {},
name = 'homepage_feed',
),
url(
regex = '^category/(?P<category>[-\w]+)/$',
view = cache_page(ArticleList.as_view(), STANDARD_CACHE_TIME),
kwargs = {},
name = 'article_list_by_category',
),
url(
regex = '^category/(?P<category>[-\w]+)/rss/$',
view = cache_page(ArticleFeed(), FEED_CACHE_TIME),
kwargs = {},
name = 'article_list_by_category_feed',
),
url(
regex = '^(?P<section>[-\w]+)/$',
view = cache_page(ArticleList.as_view(), STANDARD_CACHE_TIME),
kwargs = {},
name = 'article_list_by_section',
),
url(
regex = '^(?P<section>[-\w]+)/rss/$',
view = cache_page(ArticleFeed(), FEED_CACHE_TIME),
kwargs = {},
name = 'article_list_by_section_feed',
),
url(
regex = '^(?P<section>[-\w]+)/(?P<slug>[-\w]+)/$',
view = cache_page(ArticleDetail.as_view(), STANDARD_CACHE_TIME),
kwargs = {},
name = 'article_detail',
),
)
|
melmothx/jsonbot
|
refs/heads/master
|
jsb/lib/socklib/irc/ircevent.py
|
1
|
# gozerbot/ircevent.py
#
#
# http://www.irchelp.org/irchelp/rfc/rfc2812.txt
""" an ircevent is extracted from the IRC string received from the server. """
## jsb imports
from jsb.utils.generic import toenc, fromenc, strippedtxt
from jsb.lib.socklib.utils.generic import fix_format, stripident, makeargrest
from jsb.lib.eventbase import EventBase
## basic imports
import time
import re
import types
import copy
import logging
## defines
cpy = copy.deepcopy
## Ircevent class
class IrcEvent(EventBase):
""" represents an IRC event. """
def __deepcopy__(self, bla):
e = IrcEvent()
e.copyin(self)
return e
def parse(self, bot, rawstr):
""" parse raw string into ircevent. """
self.bottype = "irc"
self.bot = bot
self.ttl = 2
rawstr = rawstr.rstrip()
splitted = re.split('\s+', rawstr)
if not rawstr[0] == ':':
splitted.insert(0, u":%s!%s@%s" % (bot.nick, bot.name, bot.server))
rawstr = u":%s!%s@%s %s" % (bot.nick, bot.name, bot.server, rawstr)
self.prefix = splitted[0][1:]
nickuser = self.prefix.split('!')
if len(nickuser) == 2:
self.nick = nickuser[0]
#if self.bot.cfg['stripident'] or Config()['stripident']: self.userhost = stripident(nickuser[1])
self.userhost = nickuser[1]
self.cmnd = splitted[1]
self.cbtype = self.cmnd
if pfc.has_key(self.cmnd):
self.arguments = splitted[2:pfc[self.cmnd]+2]
txtsplit = re.split('\s+', rawstr, pfc[self.cmnd]+2)
self.txt = txtsplit[-1]
else:
self.arguments = splitted[2:]
if self.arguments: self.target = self.arguments[0]
self.postfix = ' '.join(self.arguments)
if self.target and self.target.startswith(':'): self.txt = ' '.join(self.arguments)
if self.txt:
if self.txt[0] == ":": self.txt = self.txt[1:]
if self.txt: self.usercmnd = self.txt.split()[0]
if self.cmnd == 'PING': self.speed = 9
if self.cmnd == 'PRIVMSG':
self.channel = self.arguments[0]
if '\001' in self.txt: self.isctcp = True
elif self.cmnd == 'JOIN' or self.cmnd == 'PART':
if self.arguments: self.channel = self.arguments[0]
else: self.channel = self.txt
elif self.cmnd == 'MODE': self.channel = self.arguments[0]
elif self.cmnd == 'TOPIC': self.channel = self.arguments[0]
elif self.cmnd == 'KICK': self.channel = self.arguments[0]
elif self.cmnd == '353': self.channel = self.arguments[2]
elif self.cmnd == '324': self.channel = self.arguments[1]
if self.userhost:
self.ruserhost = self.userhost
self.stripped = self.userhost
self.auth = self.userhost
try: self.hostname = self.userhost.split("@")[1]
except: self.hostname = None
self.origtxt = self.txt
if self.channel:
self.channel = self.channel.strip()
self.origchannel = self.channel
if self.channel == self.bot.nick:
logging.warn("irc - msg detected - setting channel to %s" % self.userhost)
self.msg = True
self.channel = self.userhost
try:
nr = int(self.cmnd)
if nr > 399 and not nr == 422: logging.error('%s - %s - %s - %s' % (self.bot.name, self.cmnd, self.arguments, self.txt))
except ValueError: pass
return self
def reply(self, txt, result=[], event=None, origin="", dot=u", ", nr=375, extend=0, *args, **kwargs):
""" reply to this event """
if self.checkqueues(result): return
if self.isdcc: self.bot.say(self.sock, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
elif self.msg: self.bot.say(self.nick, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
elif self.silent or (self.chan and self.chan.data and self.chan.data.silent): self.bot.say(self.nick, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
else: self.bot.say(self.channel, txt, result, 'msg', self, nr, extend, dot, *args, **kwargs)
return self
## postfix count - how many arguments
pfc = {}
pfc['NICK'] = 0
pfc['QUIT'] = 0
pfc['SQUIT'] = 1
pfc['JOIN'] = 0
pfc['PART'] = 1
pfc['TOPIC'] = 1
pfc['KICK'] = 2
pfc['PRIVMSG'] = 1
pfc['NOTICE'] = 1
pfc['SQUERY'] = 1
pfc['PING'] = 0
pfc['ERROR'] = 0
pfc['AWAY'] = 0
pfc['WALLOPS'] = 0
pfc['INVITE'] = 1
pfc['001'] = 1
pfc['002'] = 1
pfc['003'] = 1
pfc['004'] = 4
pfc['005'] = 15
pfc['302'] = 1
pfc['303'] = 1
pfc['301'] = 2
pfc['305'] = 1
pfc['306'] = 1
pfc['311'] = 5
pfc['312'] = 3
pfc['313'] = 2
pfc['317'] = 3
pfc['318'] = 2
pfc['319'] = 2
pfc['314'] = 5
pfc['369'] = 2
pfc['322'] = 3
pfc['323'] = 1
pfc['325'] = 3
pfc['324'] = 4
pfc['331'] = 2
pfc['332'] = 2
pfc['341'] = 3
pfc['342'] = 2
pfc['346'] = 3
pfc['347'] = 2
pfc['348'] = 3
pfc['349'] = 2
pfc['351'] = 3
pfc['352'] = 7
pfc['315'] = 2
pfc['353'] = 3
pfc['366'] = 2
pfc['364'] = 3
pfc['365'] = 2
pfc['367'] = 2
pfc['368'] = 2
pfc['371'] = 1
pfc['374'] = 1
pfc['375'] = 1
pfc['372'] = 1
pfc['376'] = 1
pfc['381'] = 1
pfc['382'] = 2
pfc['383'] = 5
pfc['391'] = 2
pfc['392'] = 1
pfc['393'] = 1
pfc['394'] = 1
pfc['395'] = 1
pfc['262'] = 3
pfc['242'] = 1
pfc['235'] = 3
pfc['250'] = 1
pfc['251'] = 1
pfc['252'] = 2
pfc['253'] = 2
pfc['254'] = 2
pfc['255'] = 1
pfc['256'] = 2
pfc['257'] = 1
pfc['258'] = 1
pfc['259'] = 1
pfc['263'] = 2
pfc['265'] = 1
pfc['266'] = 1
pfc['401'] = 2
pfc['402'] = 2
pfc['403'] = 2
pfc['404'] = 2
pfc['405'] = 2
pfc['406'] = 2
pfc['407'] = 2
pfc['408'] = 2
pfc['409'] = 1
pfc['411'] = 1
pfc['412'] = 1
pfc['413'] = 2
pfc['414'] = 2
pfc['415'] = 2
pfc['421'] = 2
pfc['422'] = 1
pfc['423'] = 2
pfc['424'] = 1
pfc['431'] = 1
pfc['432'] = 2
pfc['433'] = 2
pfc['436'] = 2
pfc['437'] = 2
pfc['441'] = 3
pfc['442'] = 2
pfc['443'] = 3
pfc['444'] = 2
pfc['445'] = 1
pfc['446'] = 1
pfc['451'] = 1
pfc['461'] = 2
pfc['462'] = 1
pfc['463'] = 1
pfc['464'] = 1
pfc['465'] = 1
pfc['467'] = 2
pfc['471'] = 2
pfc['472'] = 2
pfc['473'] = 2
pfc['474'] = 2
pfc['475'] = 2
pfc['476'] = 2
pfc['477'] = 2
pfc['478'] = 3
pfc['481'] = 1
pfc['482'] = 2
pfc['483'] = 1
pfc['484'] = 1
pfc['485'] = 1
pfc['491'] = 1
pfc['501'] = 1
pfc['502'] = 1
pfc['700'] = 2
|
insiderr/insiderr-app
|
refs/heads/master
|
app/modules/requests/packages/chardet/chardistribution.py
|
2
|
# ####################### BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
marco-lancini/Showcase
|
refs/heads/master
|
django/contrib/admin/media/js/compress.py
|
784
|
#!/usr/bin/env python
import os
import optparse
import subprocess
import sys
here = os.path.dirname(__file__)
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(here, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
|
ancho85/pylint-playero-plugin
|
refs/heads/master
|
tests/test_tools.py
|
1
|
# -*- coding: utf-8 -*-
import unittest
from libs.tools import *
class TestTools(unittest.TestCase):
def test_latinToAscii(self):
assert latinToAscii(u"Hönig") == "Honig"
assert latinToAscii(u"€") == ""
def test_embeddedImport(self):
button = embeddedImport("Embedded_ButtonObj")
assert hasattr(button, "Embedded_ButtonObj")
def test_includeZipLib(self):
includeZipLib("cache.py") #no zip available
assert True
def test_isNumber(self):
assert isNumber(1) == True
assert isNumber("No") == False
assert isNumber(int) == False
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTools))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
kamarush/android_kernel_lge_hammerhead
|
refs/heads/nougat-mr1
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
TimofeyFox/GT-S7270_kernel
|
refs/heads/master
|
scripts/rt-tester/rt-tester.py
|
11005
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
Microsoft/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/lib/mmsystem.py
|
24
|
# Generated by h2py from d:/msdev/include/mmsystem.h
MAXPNAMELEN = 32
MAXERRORLENGTH = 256
MAX_JOYSTICKOEMVXDNAME = 260
MM_MICROSOFT = 1
MM_MIDI_MAPPER = 1
MM_WAVE_MAPPER = 2
MM_SNDBLST_MIDIOUT = 3
MM_SNDBLST_MIDIIN = 4
MM_SNDBLST_SYNTH = 5
MM_SNDBLST_WAVEOUT = 6
MM_SNDBLST_WAVEIN = 7
MM_ADLIB = 9
MM_MPU401_MIDIOUT = 10
MM_MPU401_MIDIIN = 11
MM_PC_JOYSTICK = 12
TIME_MS = 0x0001
TIME_SAMPLES = 0x0002
TIME_BYTES = 0x0004
TIME_SMPTE = 0x0008
TIME_MIDI = 0x0010
TIME_TICKS = 0x0020
MM_JOY1MOVE = 0x3A0
MM_JOY2MOVE = 0x3A1
MM_JOY1ZMOVE = 0x3A2
MM_JOY2ZMOVE = 0x3A3
MM_JOY1BUTTONDOWN = 0x3B5
MM_JOY2BUTTONDOWN = 0x3B6
MM_JOY1BUTTONUP = 0x3B7
MM_JOY2BUTTONUP = 0x3B8
MM_MCINOTIFY = 0x3B9
MM_WOM_OPEN = 0x3BB
MM_WOM_CLOSE = 0x3BC
MM_WOM_DONE = 0x3BD
MM_WIM_OPEN = 0x3BE
MM_WIM_CLOSE = 0x3BF
MM_WIM_DATA = 0x3C0
MM_MIM_OPEN = 0x3C1
MM_MIM_CLOSE = 0x3C2
MM_MIM_DATA = 0x3C3
MM_MIM_LONGDATA = 0x3C4
MM_MIM_ERROR = 0x3C5
MM_MIM_LONGERROR = 0x3C6
MM_MOM_OPEN = 0x3C7
MM_MOM_CLOSE = 0x3C8
MM_MOM_DONE = 0x3C9
MM_STREAM_OPEN = 0x3D4
MM_STREAM_CLOSE = 0x3D5
MM_STREAM_DONE = 0x3D6
MM_STREAM_ERROR = 0x3D7
MM_MOM_POSITIONCB = 0x3CA
MM_MIM_MOREDATA = 0x3CC
MM_MIXM_LINE_CHANGE = 0x3D0
MM_MIXM_CONTROL_CHANGE = 0x3D1
MMSYSERR_BASE = 0
WAVERR_BASE = 32
MIDIERR_BASE = 64
TIMERR_BASE = 96
JOYERR_BASE = 160
MCIERR_BASE = 256
MIXERR_BASE = 1024
MCI_STRING_OFFSET = 512
MCI_VD_OFFSET = 1024
MCI_CD_OFFSET = 1088
MCI_WAVE_OFFSET = 1152
MCI_SEQ_OFFSET = 1216
MMSYSERR_NOERROR = 0
MMSYSERR_ERROR = (MMSYSERR_BASE + 1)
MMSYSERR_BADDEVICEID = (MMSYSERR_BASE + 2)
MMSYSERR_NOTENABLED = (MMSYSERR_BASE + 3)
MMSYSERR_ALLOCATED = (MMSYSERR_BASE + 4)
MMSYSERR_INVALHANDLE = (MMSYSERR_BASE + 5)
MMSYSERR_NODRIVER = (MMSYSERR_BASE + 6)
MMSYSERR_NOMEM = (MMSYSERR_BASE + 7)
MMSYSERR_NOTSUPPORTED = (MMSYSERR_BASE + 8)
MMSYSERR_BADERRNUM = (MMSYSERR_BASE + 9)
MMSYSERR_INVALFLAG = (MMSYSERR_BASE + 10)
MMSYSERR_INVALPARAM = (MMSYSERR_BASE + 11)
MMSYSERR_HANDLEBUSY = (MMSYSERR_BASE + 12)
MMSYSERR_INVALIDALIAS = (MMSYSERR_BASE + 13)
MMSYSERR_BADDB = (MMSYSERR_BASE + 14)
MMSYSERR_KEYNOTFOUND = (MMSYSERR_BASE + 15)
MMSYSERR_READERROR = (MMSYSERR_BASE + 16)
MMSYSERR_WRITEERROR = (MMSYSERR_BASE + 17)
MMSYSERR_DELETEERROR = (MMSYSERR_BASE + 18)
MMSYSERR_VALNOTFOUND = (MMSYSERR_BASE + 19)
MMSYSERR_NODRIVERCB = (MMSYSERR_BASE + 20)
MMSYSERR_LASTERROR = (MMSYSERR_BASE + 20)
DRV_LOAD = 0x0001
DRV_ENABLE = 0x0002
DRV_OPEN = 0x0003
DRV_CLOSE = 0x0004
DRV_DISABLE = 0x0005
DRV_FREE = 0x0006
DRV_CONFIGURE = 0x0007
DRV_QUERYCONFIGURE = 0x0008
DRV_INSTALL = 0x0009
DRV_REMOVE = 0x000A
DRV_EXITSESSION = 0x000B
DRV_POWER = 0x000F
DRV_RESERVED = 0x0800
DRV_USER = 0x4000
DRVCNF_CANCEL = 0x0000
DRVCNF_OK = 0x0001
DRVCNF_RESTART = 0x0002
DRV_CANCEL = DRVCNF_CANCEL
DRV_OK = DRVCNF_OK
DRV_RESTART = DRVCNF_RESTART
DRV_MCI_FIRST = DRV_RESERVED
DRV_MCI_LAST = (DRV_RESERVED + 0xFFF)
CALLBACK_TYPEMASK = 0x00070000
CALLBACK_NULL = 0x00000000
CALLBACK_WINDOW = 0x00010000
CALLBACK_TASK = 0x00020000
CALLBACK_FUNCTION = 0x00030000
CALLBACK_THREAD = (CALLBACK_TASK)
CALLBACK_EVENT = 0x00050000
SND_SYNC = 0x0000
SND_ASYNC = 0x0001
SND_NODEFAULT = 0x0002
SND_MEMORY = 0x0004
SND_LOOP = 0x0008
SND_NOSTOP = 0x0010
SND_NOWAIT = 0x00002000
SND_ALIAS = 0x00010000
SND_ALIAS_ID = 0x00110000
SND_FILENAME = 0x00020000
SND_RESOURCE = 0x00040004
SND_PURGE = 0x0040
SND_APPLICATION = 0x0080
SND_ALIAS_START = 0
WAVERR_BADFORMAT = (WAVERR_BASE + 0)
WAVERR_STILLPLAYING = (WAVERR_BASE + 1)
WAVERR_UNPREPARED = (WAVERR_BASE + 2)
WAVERR_SYNC = (WAVERR_BASE + 3)
WAVERR_LASTERROR = (WAVERR_BASE + 3)
WOM_OPEN = MM_WOM_OPEN
WOM_CLOSE = MM_WOM_CLOSE
WOM_DONE = MM_WOM_DONE
WIM_OPEN = MM_WIM_OPEN
WIM_CLOSE = MM_WIM_CLOSE
WIM_DATA = MM_WIM_DATA
WAVE_MAPPER = -1 # 0xFFFFFFFF
WAVE_FORMAT_QUERY = 0x0001
WAVE_ALLOWSYNC = 0x0002
WAVE_MAPPED = 0x0004
WAVE_FORMAT_DIRECT = 0x0008
WAVE_FORMAT_DIRECT_QUERY = (WAVE_FORMAT_QUERY | WAVE_FORMAT_DIRECT)
WHDR_DONE = 0x00000001
WHDR_PREPARED = 0x00000002
WHDR_BEGINLOOP = 0x00000004
WHDR_ENDLOOP = 0x00000008
WHDR_INQUEUE = 0x00000010
WAVECAPS_PITCH = 0x0001
WAVECAPS_PLAYBACKRATE = 0x0002
WAVECAPS_VOLUME = 0x0004
WAVECAPS_LRVOLUME = 0x0008
WAVECAPS_SYNC = 0x0010
WAVECAPS_SAMPLEACCURATE = 0x0020
WAVECAPS_DIRECTSOUND = 0x0040
WAVE_INVALIDFORMAT = 0x00000000
WAVE_FORMAT_1M08 = 0x00000001
WAVE_FORMAT_1S08 = 0x00000002
WAVE_FORMAT_1M16 = 0x00000004
WAVE_FORMAT_1S16 = 0x00000008
WAVE_FORMAT_2M08 = 0x00000010
WAVE_FORMAT_2S08 = 0x00000020
WAVE_FORMAT_2M16 = 0x00000040
WAVE_FORMAT_2S16 = 0x00000080
WAVE_FORMAT_4M08 = 0x00000100
WAVE_FORMAT_4S08 = 0x00000200
WAVE_FORMAT_4M16 = 0x00000400
WAVE_FORMAT_4S16 = 0x00000800
WAVE_FORMAT_PCM = 1
WAVE_FORMAT_IEEE_FLOAT = 3
MIDIERR_UNPREPARED = (MIDIERR_BASE + 0)
MIDIERR_STILLPLAYING = (MIDIERR_BASE + 1)
MIDIERR_NOMAP = (MIDIERR_BASE + 2)
MIDIERR_NOTREADY = (MIDIERR_BASE + 3)
MIDIERR_NODEVICE = (MIDIERR_BASE + 4)
MIDIERR_INVALIDSETUP = (MIDIERR_BASE + 5)
MIDIERR_BADOPENMODE = (MIDIERR_BASE + 6)
MIDIERR_DONT_CONTINUE = (MIDIERR_BASE + 7)
MIDIERR_LASTERROR = (MIDIERR_BASE + 7)
MIDIPATCHSIZE = 128
MIM_OPEN = MM_MIM_OPEN
MIM_CLOSE = MM_MIM_CLOSE
MIM_DATA = MM_MIM_DATA
MIM_LONGDATA = MM_MIM_LONGDATA
MIM_ERROR = MM_MIM_ERROR
MIM_LONGERROR = MM_MIM_LONGERROR
MOM_OPEN = MM_MOM_OPEN
MOM_CLOSE = MM_MOM_CLOSE
MOM_DONE = MM_MOM_DONE
MIM_MOREDATA = MM_MIM_MOREDATA
MOM_POSITIONCB = MM_MOM_POSITIONCB
MIDI_IO_STATUS = 0x00000020
MIDI_CACHE_ALL = 1
MIDI_CACHE_BESTFIT = 2
MIDI_CACHE_QUERY = 3
MIDI_UNCACHE = 4
MOD_MIDIPORT = 1
MOD_SYNTH = 2
MOD_SQSYNTH = 3
MOD_FMSYNTH = 4
MOD_MAPPER = 5
MIDICAPS_VOLUME = 0x0001
MIDICAPS_LRVOLUME = 0x0002
MIDICAPS_CACHE = 0x0004
MIDICAPS_STREAM = 0x0008
MHDR_DONE = 0x00000001
MHDR_PREPARED = 0x00000002
MHDR_INQUEUE = 0x00000004
MHDR_ISSTRM = 0x00000008
MEVT_F_SHORT = 0x00000000
MEVT_F_LONG = -2147483648 # 0x80000000
MEVT_F_CALLBACK = 0x40000000
def MEVT_EVENTTYPE(x): return ((BYTE)(((x)>>24)&0xFF))
def MEVT_EVENTPARM(x): return ((DWORD)((x)&0x00FFFFFF))
MIDISTRM_ERROR = (-2)
MIDIPROP_SET = -2147483648 # 0x80000000
MIDIPROP_GET = 0x40000000
MIDIPROP_TIMEDIV = 0x00000001
MIDIPROP_TEMPO = 0x00000002
AUXCAPS_CDAUDIO = 1
AUXCAPS_AUXIN = 2
AUXCAPS_VOLUME = 0x0001
AUXCAPS_LRVOLUME = 0x0002
MIXER_SHORT_NAME_CHARS = 16
MIXER_LONG_NAME_CHARS = 64
MIXERR_INVALLINE = (MIXERR_BASE + 0)
MIXERR_INVALCONTROL = (MIXERR_BASE + 1)
MIXERR_INVALVALUE = (MIXERR_BASE + 2)
MIXERR_LASTERROR = (MIXERR_BASE + 2)
MIXER_OBJECTF_HANDLE = -2147483648 # 0x80000000
MIXER_OBJECTF_MIXER = 0x00000000
MIXER_OBJECTF_HMIXER = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_MIXER)
MIXER_OBJECTF_WAVEOUT = 0x10000000
MIXER_OBJECTF_HWAVEOUT = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_WAVEOUT)
MIXER_OBJECTF_WAVEIN = 0x20000000
MIXER_OBJECTF_HWAVEIN = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_WAVEIN)
MIXER_OBJECTF_MIDIOUT = 0x30000000
MIXER_OBJECTF_HMIDIOUT = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_MIDIOUT)
MIXER_OBJECTF_MIDIIN = 0x40000000
MIXER_OBJECTF_HMIDIIN = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_MIDIIN)
MIXER_OBJECTF_AUX = 0x50000000
MIXERLINE_LINEF_ACTIVE = 0x00000001
MIXERLINE_LINEF_DISCONNECTED = 0x00008000
MIXERLINE_LINEF_SOURCE = -2147483648 # 0x80000000
MIXERLINE_COMPONENTTYPE_DST_FIRST = 0x00000000
MIXERLINE_COMPONENTTYPE_DST_UNDEFINED = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 0)
MIXERLINE_COMPONENTTYPE_DST_DIGITAL = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 1)
MIXERLINE_COMPONENTTYPE_DST_LINE = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 2)
MIXERLINE_COMPONENTTYPE_DST_MONITOR = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 3)
MIXERLINE_COMPONENTTYPE_DST_SPEAKERS = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 4)
MIXERLINE_COMPONENTTYPE_DST_HEADPHONES = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 5)
MIXERLINE_COMPONENTTYPE_DST_TELEPHONE = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 6)
MIXERLINE_COMPONENTTYPE_DST_WAVEIN = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 7)
MIXERLINE_COMPONENTTYPE_DST_VOICEIN = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 8)
MIXERLINE_COMPONENTTYPE_DST_LAST = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 8)
MIXERLINE_COMPONENTTYPE_SRC_FIRST = 0x00001000
MIXERLINE_COMPONENTTYPE_SRC_UNDEFINED = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 0)
MIXERLINE_COMPONENTTYPE_SRC_DIGITAL = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 1)
MIXERLINE_COMPONENTTYPE_SRC_LINE = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 2)
MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 3)
MIXERLINE_COMPONENTTYPE_SRC_SYNTHESIZER = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 4)
MIXERLINE_COMPONENTTYPE_SRC_COMPACTDISC = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 5)
MIXERLINE_COMPONENTTYPE_SRC_TELEPHONE = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 6)
MIXERLINE_COMPONENTTYPE_SRC_PCSPEAKER = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 7)
MIXERLINE_COMPONENTTYPE_SRC_WAVEOUT = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 8)
MIXERLINE_COMPONENTTYPE_SRC_AUXILIARY = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 9)
MIXERLINE_COMPONENTTYPE_SRC_ANALOG = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 10)
MIXERLINE_COMPONENTTYPE_SRC_LAST = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 10)
MIXERLINE_TARGETTYPE_UNDEFINED = 0
MIXERLINE_TARGETTYPE_WAVEOUT = 1
MIXERLINE_TARGETTYPE_WAVEIN = 2
MIXERLINE_TARGETTYPE_MIDIOUT = 3
MIXERLINE_TARGETTYPE_MIDIIN = 4
MIXERLINE_TARGETTYPE_AUX = 5
MIXER_GETLINEINFOF_DESTINATION = 0x00000000
MIXER_GETLINEINFOF_SOURCE = 0x00000001
MIXER_GETLINEINFOF_LINEID = 0x00000002
MIXER_GETLINEINFOF_COMPONENTTYPE = 0x00000003
MIXER_GETLINEINFOF_TARGETTYPE = 0x00000004
MIXER_GETLINEINFOF_QUERYMASK = 0x0000000F
MIXERCONTROL_CONTROLF_UNIFORM = 0x00000001
MIXERCONTROL_CONTROLF_MULTIPLE = 0x00000002
MIXERCONTROL_CONTROLF_DISABLED = -2147483648 # 0x80000000
MIXERCONTROL_CT_CLASS_MASK = -268435456 # 0xF0000000
MIXERCONTROL_CT_CLASS_CUSTOM = 0x00000000
MIXERCONTROL_CT_CLASS_METER = 0x10000000
MIXERCONTROL_CT_CLASS_SWITCH = 0x20000000
MIXERCONTROL_CT_CLASS_NUMBER = 0x30000000
MIXERCONTROL_CT_CLASS_SLIDER = 0x40000000
MIXERCONTROL_CT_CLASS_FADER = 0x50000000
MIXERCONTROL_CT_CLASS_TIME = 0x60000000
MIXERCONTROL_CT_CLASS_LIST = 0x70000000
MIXERCONTROL_CT_SUBCLASS_MASK = 0x0F000000
MIXERCONTROL_CT_SC_SWITCH_BOOLEAN = 0x00000000
MIXERCONTROL_CT_SC_SWITCH_BUTTON = 0x01000000
MIXERCONTROL_CT_SC_METER_POLLED = 0x00000000
MIXERCONTROL_CT_SC_TIME_MICROSECS = 0x00000000
MIXERCONTROL_CT_SC_TIME_MILLISECS = 0x01000000
MIXERCONTROL_CT_SC_LIST_SINGLE = 0x00000000
MIXERCONTROL_CT_SC_LIST_MULTIPLE = 0x01000000
MIXERCONTROL_CT_UNITS_MASK = 0x00FF0000
MIXERCONTROL_CT_UNITS_CUSTOM = 0x00000000
MIXERCONTROL_CT_UNITS_BOOLEAN = 0x00010000
MIXERCONTROL_CT_UNITS_SIGNED = 0x00020000
MIXERCONTROL_CT_UNITS_UNSIGNED = 0x00030000
MIXERCONTROL_CT_UNITS_DECIBELS = 0x00040000
MIXERCONTROL_CT_UNITS_PERCENT = 0x00050000
MIXERCONTROL_CONTROLTYPE_CUSTOM = (MIXERCONTROL_CT_CLASS_CUSTOM | MIXERCONTROL_CT_UNITS_CUSTOM)
MIXERCONTROL_CONTROLTYPE_BOOLEANMETER = (MIXERCONTROL_CT_CLASS_METER | MIXERCONTROL_CT_SC_METER_POLLED | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_SIGNEDMETER = (MIXERCONTROL_CT_CLASS_METER | MIXERCONTROL_CT_SC_METER_POLLED | MIXERCONTROL_CT_UNITS_SIGNED)
MIXERCONTROL_CONTROLTYPE_PEAKMETER = (MIXERCONTROL_CONTROLTYPE_SIGNEDMETER + 1)
MIXERCONTROL_CONTROLTYPE_UNSIGNEDMETER = (MIXERCONTROL_CT_CLASS_METER | MIXERCONTROL_CT_SC_METER_POLLED | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_BOOLEAN = (MIXERCONTROL_CT_CLASS_SWITCH | MIXERCONTROL_CT_SC_SWITCH_BOOLEAN | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_ONOFF = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 1)
MIXERCONTROL_CONTROLTYPE_MUTE = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 2)
MIXERCONTROL_CONTROLTYPE_MONO = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 3)
MIXERCONTROL_CONTROLTYPE_LOUDNESS = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 4)
MIXERCONTROL_CONTROLTYPE_STEREOENH = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 5)
MIXERCONTROL_CONTROLTYPE_BUTTON = (MIXERCONTROL_CT_CLASS_SWITCH | MIXERCONTROL_CT_SC_SWITCH_BUTTON | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_DECIBELS = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_DECIBELS)
MIXERCONTROL_CONTROLTYPE_SIGNED = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_SIGNED)
MIXERCONTROL_CONTROLTYPE_UNSIGNED = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_PERCENT = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_PERCENT)
MIXERCONTROL_CONTROLTYPE_SLIDER = (MIXERCONTROL_CT_CLASS_SLIDER | MIXERCONTROL_CT_UNITS_SIGNED)
MIXERCONTROL_CONTROLTYPE_PAN = (MIXERCONTROL_CONTROLTYPE_SLIDER + 1)
MIXERCONTROL_CONTROLTYPE_QSOUNDPAN = (MIXERCONTROL_CONTROLTYPE_SLIDER + 2)
MIXERCONTROL_CONTROLTYPE_FADER = (MIXERCONTROL_CT_CLASS_FADER | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_VOLUME = (MIXERCONTROL_CONTROLTYPE_FADER + 1)
MIXERCONTROL_CONTROLTYPE_BASS = (MIXERCONTROL_CONTROLTYPE_FADER + 2)
MIXERCONTROL_CONTROLTYPE_TREBLE = (MIXERCONTROL_CONTROLTYPE_FADER + 3)
MIXERCONTROL_CONTROLTYPE_EQUALIZER = (MIXERCONTROL_CONTROLTYPE_FADER + 4)
MIXERCONTROL_CONTROLTYPE_SINGLESELECT = (MIXERCONTROL_CT_CLASS_LIST | MIXERCONTROL_CT_SC_LIST_SINGLE | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_MUX = (MIXERCONTROL_CONTROLTYPE_SINGLESELECT + 1)
MIXERCONTROL_CONTROLTYPE_MULTIPLESELECT = (MIXERCONTROL_CT_CLASS_LIST | MIXERCONTROL_CT_SC_LIST_MULTIPLE | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_MIXER = (MIXERCONTROL_CONTROLTYPE_MULTIPLESELECT + 1)
MIXERCONTROL_CONTROLTYPE_MICROTIME = (MIXERCONTROL_CT_CLASS_TIME | MIXERCONTROL_CT_SC_TIME_MICROSECS | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_MILLITIME = (MIXERCONTROL_CT_CLASS_TIME | MIXERCONTROL_CT_SC_TIME_MILLISECS | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXER_GETLINECONTROLSF_ALL = 0x00000000
MIXER_GETLINECONTROLSF_ONEBYID = 0x00000001
MIXER_GETLINECONTROLSF_ONEBYTYPE = 0x00000002
MIXER_GETLINECONTROLSF_QUERYMASK = 0x0000000F
MIXER_GETCONTROLDETAILSF_VALUE = 0x00000000
MIXER_GETCONTROLDETAILSF_LISTTEXT = 0x00000001
MIXER_GETCONTROLDETAILSF_QUERYMASK = 0x0000000F
MIXER_SETCONTROLDETAILSF_VALUE = 0x00000000
MIXER_SETCONTROLDETAILSF_CUSTOM = 0x00000001
MIXER_SETCONTROLDETAILSF_QUERYMASK = 0x0000000F
TIMERR_NOERROR = (0)
TIMERR_NOCANDO = (TIMERR_BASE+1)
TIMERR_STRUCT = (TIMERR_BASE+33)
TIME_ONESHOT = 0x0000
TIME_PERIODIC = 0x0001
TIME_CALLBACK_FUNCTION = 0x0000
TIME_CALLBACK_EVENT_SET = 0x0010
TIME_CALLBACK_EVENT_PULSE = 0x0020
JOYERR_NOERROR = (0)
JOYERR_PARMS = (JOYERR_BASE+5)
JOYERR_NOCANDO = (JOYERR_BASE+6)
JOYERR_UNPLUGGED = (JOYERR_BASE+7)
JOY_BUTTON1 = 0x0001
JOY_BUTTON2 = 0x0002
JOY_BUTTON3 = 0x0004
JOY_BUTTON4 = 0x0008
JOY_BUTTON1CHG = 0x0100
JOY_BUTTON2CHG = 0x0200
JOY_BUTTON3CHG = 0x0400
JOY_BUTTON4CHG = 0x0800
JOY_BUTTON5 = 0x00000010
JOY_BUTTON6 = 0x00000020
JOY_BUTTON7 = 0x00000040
JOY_BUTTON8 = 0x00000080
JOY_BUTTON9 = 0x00000100
JOY_BUTTON10 = 0x00000200
JOY_BUTTON11 = 0x00000400
JOY_BUTTON12 = 0x00000800
JOY_BUTTON13 = 0x00001000
JOY_BUTTON14 = 0x00002000
JOY_BUTTON15 = 0x00004000
JOY_BUTTON16 = 0x00008000
JOY_BUTTON17 = 0x00010000
JOY_BUTTON18 = 0x00020000
JOY_BUTTON19 = 0x00040000
JOY_BUTTON20 = 0x00080000
JOY_BUTTON21 = 0x00100000
JOY_BUTTON22 = 0x00200000
JOY_BUTTON23 = 0x00400000
JOY_BUTTON24 = 0x00800000
JOY_BUTTON25 = 0x01000000
JOY_BUTTON26 = 0x02000000
JOY_BUTTON27 = 0x04000000
JOY_BUTTON28 = 0x08000000
JOY_BUTTON29 = 0x10000000
JOY_BUTTON30 = 0x20000000
JOY_BUTTON31 = 0x40000000
JOY_BUTTON32 = -2147483648 # 0x80000000
JOY_POVFORWARD = 0
JOY_POVRIGHT = 9000
JOY_POVBACKWARD = 18000
JOY_POVLEFT = 27000
JOY_RETURNX = 0x00000001
JOY_RETURNY = 0x00000002
JOY_RETURNZ = 0x00000004
JOY_RETURNR = 0x00000008
JOY_RETURNU = 0x00000010
JOY_RETURNV = 0x00000020
JOY_RETURNPOV = 0x00000040
JOY_RETURNBUTTONS = 0x00000080
JOY_RETURNRAWDATA = 0x00000100
JOY_RETURNPOVCTS = 0x00000200
JOY_RETURNCENTERED = 0x00000400
JOY_USEDEADZONE = 0x00000800
JOY_RETURNALL = (JOY_RETURNX | JOY_RETURNY | JOY_RETURNZ | \
JOY_RETURNR | JOY_RETURNU | JOY_RETURNV | \
JOY_RETURNPOV | JOY_RETURNBUTTONS)
JOY_CAL_READALWAYS = 0x00010000
JOY_CAL_READXYONLY = 0x00020000
JOY_CAL_READ3 = 0x00040000
JOY_CAL_READ4 = 0x00080000
JOY_CAL_READXONLY = 0x00100000
JOY_CAL_READYONLY = 0x00200000
JOY_CAL_READ5 = 0x00400000
JOY_CAL_READ6 = 0x00800000
JOY_CAL_READZONLY = 0x01000000
JOY_CAL_READRONLY = 0x02000000
JOY_CAL_READUONLY = 0x04000000
JOY_CAL_READVONLY = 0x08000000
JOYSTICKID1 = 0
JOYSTICKID2 = 1
JOYCAPS_HASZ = 0x0001
JOYCAPS_HASR = 0x0002
JOYCAPS_HASU = 0x0004
JOYCAPS_HASV = 0x0008
JOYCAPS_HASPOV = 0x0010
JOYCAPS_POV4DIR = 0x0020
JOYCAPS_POVCTS = 0x0040
MMIOERR_BASE = 256
MMIOERR_FILENOTFOUND = (MMIOERR_BASE + 1)
MMIOERR_OUTOFMEMORY = (MMIOERR_BASE + 2)
MMIOERR_CANNOTOPEN = (MMIOERR_BASE + 3)
MMIOERR_CANNOTCLOSE = (MMIOERR_BASE + 4)
MMIOERR_CANNOTREAD = (MMIOERR_BASE + 5)
MMIOERR_CANNOTWRITE = (MMIOERR_BASE + 6)
MMIOERR_CANNOTSEEK = (MMIOERR_BASE + 7)
MMIOERR_CANNOTEXPAND = (MMIOERR_BASE + 8)
MMIOERR_CHUNKNOTFOUND = (MMIOERR_BASE + 9)
MMIOERR_UNBUFFERED = (MMIOERR_BASE + 10)
MMIOERR_PATHNOTFOUND = (MMIOERR_BASE + 11)
MMIOERR_ACCESSDENIED = (MMIOERR_BASE + 12)
MMIOERR_SHARINGVIOLATION = (MMIOERR_BASE + 13)
MMIOERR_NETWORKERROR = (MMIOERR_BASE + 14)
MMIOERR_TOOMANYOPENFILES = (MMIOERR_BASE + 15)
MMIOERR_INVALIDFILE = (MMIOERR_BASE + 16)
CFSEPCHAR = ord('+')
MMIO_RWMODE = 0x00000003
MMIO_SHAREMODE = 0x00000070
MMIO_CREATE = 0x00001000
MMIO_PARSE = 0x00000100
MMIO_DELETE = 0x00000200
MMIO_EXIST = 0x00004000
MMIO_ALLOCBUF = 0x00010000
MMIO_GETTEMP = 0x00020000
MMIO_DIRTY = 0x10000000
MMIO_READ = 0x00000000
MMIO_WRITE = 0x00000001
MMIO_READWRITE = 0x00000002
MMIO_COMPAT = 0x00000000
MMIO_EXCLUSIVE = 0x00000010
MMIO_DENYWRITE = 0x00000020
MMIO_DENYREAD = 0x00000030
MMIO_DENYNONE = 0x00000040
MMIO_FHOPEN = 0x0010
MMIO_EMPTYBUF = 0x0010
MMIO_TOUPPER = 0x0010
MMIO_INSTALLPROC = 0x00010000
MMIO_GLOBALPROC = 0x10000000
MMIO_REMOVEPROC = 0x00020000
MMIO_UNICODEPROC = 0x01000000
MMIO_FINDPROC = 0x00040000
MMIO_FINDCHUNK = 0x0010
MMIO_FINDRIFF = 0x0020
MMIO_FINDLIST = 0x0040
MMIO_CREATERIFF = 0x0020
MMIO_CREATELIST = 0x0040
MMIOM_READ = MMIO_READ
MMIOM_WRITE = MMIO_WRITE
MMIOM_SEEK = 2
MMIOM_OPEN = 3
MMIOM_CLOSE = 4
MMIOM_WRITEFLUSH = 5
MMIOM_RENAME = 6
MMIOM_USER = 0x8000
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
MMIO_DEFAULTBUFFER = 8192
MCIERR_INVALID_DEVICE_ID = (MCIERR_BASE + 1)
MCIERR_UNRECOGNIZED_KEYWORD = (MCIERR_BASE + 3)
MCIERR_UNRECOGNIZED_COMMAND = (MCIERR_BASE + 5)
MCIERR_HARDWARE = (MCIERR_BASE + 6)
MCIERR_INVALID_DEVICE_NAME = (MCIERR_BASE + 7)
MCIERR_OUT_OF_MEMORY = (MCIERR_BASE + 8)
MCIERR_DEVICE_OPEN = (MCIERR_BASE + 9)
MCIERR_CANNOT_LOAD_DRIVER = (MCIERR_BASE + 10)
MCIERR_MISSING_COMMAND_STRING = (MCIERR_BASE + 11)
MCIERR_PARAM_OVERFLOW = (MCIERR_BASE + 12)
MCIERR_MISSING_STRING_ARGUMENT = (MCIERR_BASE + 13)
MCIERR_BAD_INTEGER = (MCIERR_BASE + 14)
MCIERR_PARSER_INTERNAL = (MCIERR_BASE + 15)
MCIERR_DRIVER_INTERNAL = (MCIERR_BASE + 16)
MCIERR_MISSING_PARAMETER = (MCIERR_BASE + 17)
MCIERR_UNSUPPORTED_FUNCTION = (MCIERR_BASE + 18)
MCIERR_FILE_NOT_FOUND = (MCIERR_BASE + 19)
MCIERR_DEVICE_NOT_READY = (MCIERR_BASE + 20)
MCIERR_INTERNAL = (MCIERR_BASE + 21)
MCIERR_DRIVER = (MCIERR_BASE + 22)
MCIERR_CANNOT_USE_ALL = (MCIERR_BASE + 23)
MCIERR_MULTIPLE = (MCIERR_BASE + 24)
MCIERR_EXTENSION_NOT_FOUND = (MCIERR_BASE + 25)
MCIERR_OUTOFRANGE = (MCIERR_BASE + 26)
MCIERR_FLAGS_NOT_COMPATIBLE = (MCIERR_BASE + 28)
MCIERR_FILE_NOT_SAVED = (MCIERR_BASE + 30)
MCIERR_DEVICE_TYPE_REQUIRED = (MCIERR_BASE + 31)
MCIERR_DEVICE_LOCKED = (MCIERR_BASE + 32)
MCIERR_DUPLICATE_ALIAS = (MCIERR_BASE + 33)
MCIERR_BAD_CONSTANT = (MCIERR_BASE + 34)
MCIERR_MUST_USE_SHAREABLE = (MCIERR_BASE + 35)
MCIERR_MISSING_DEVICE_NAME = (MCIERR_BASE + 36)
MCIERR_BAD_TIME_FORMAT = (MCIERR_BASE + 37)
MCIERR_NO_CLOSING_QUOTE = (MCIERR_BASE + 38)
MCIERR_DUPLICATE_FLAGS = (MCIERR_BASE + 39)
MCIERR_INVALID_FILE = (MCIERR_BASE + 40)
MCIERR_NULL_PARAMETER_BLOCK = (MCIERR_BASE + 41)
MCIERR_UNNAMED_RESOURCE = (MCIERR_BASE + 42)
MCIERR_NEW_REQUIRES_ALIAS = (MCIERR_BASE + 43)
MCIERR_NOTIFY_ON_AUTO_OPEN = (MCIERR_BASE + 44)
MCIERR_NO_ELEMENT_ALLOWED = (MCIERR_BASE + 45)
MCIERR_NONAPPLICABLE_FUNCTION = (MCIERR_BASE + 46)
MCIERR_ILLEGAL_FOR_AUTO_OPEN = (MCIERR_BASE + 47)
MCIERR_FILENAME_REQUIRED = (MCIERR_BASE + 48)
MCIERR_EXTRA_CHARACTERS = (MCIERR_BASE + 49)
MCIERR_DEVICE_NOT_INSTALLED = (MCIERR_BASE + 50)
MCIERR_GET_CD = (MCIERR_BASE + 51)
MCIERR_SET_CD = (MCIERR_BASE + 52)
MCIERR_SET_DRIVE = (MCIERR_BASE + 53)
MCIERR_DEVICE_LENGTH = (MCIERR_BASE + 54)
MCIERR_DEVICE_ORD_LENGTH = (MCIERR_BASE + 55)
MCIERR_NO_INTEGER = (MCIERR_BASE + 56)
MCIERR_WAVE_OUTPUTSINUSE = (MCIERR_BASE + 64)
MCIERR_WAVE_SETOUTPUTINUSE = (MCIERR_BASE + 65)
MCIERR_WAVE_INPUTSINUSE = (MCIERR_BASE + 66)
MCIERR_WAVE_SETINPUTINUSE = (MCIERR_BASE + 67)
MCIERR_WAVE_OUTPUTUNSPECIFIED = (MCIERR_BASE + 68)
MCIERR_WAVE_INPUTUNSPECIFIED = (MCIERR_BASE + 69)
MCIERR_WAVE_OUTPUTSUNSUITABLE = (MCIERR_BASE + 70)
MCIERR_WAVE_SETOUTPUTUNSUITABLE = (MCIERR_BASE + 71)
MCIERR_WAVE_INPUTSUNSUITABLE = (MCIERR_BASE + 72)
MCIERR_WAVE_SETINPUTUNSUITABLE = (MCIERR_BASE + 73)
MCIERR_SEQ_DIV_INCOMPATIBLE = (MCIERR_BASE + 80)
MCIERR_SEQ_PORT_INUSE = (MCIERR_BASE + 81)
MCIERR_SEQ_PORT_NONEXISTENT = (MCIERR_BASE + 82)
MCIERR_SEQ_PORT_MAPNODEVICE = (MCIERR_BASE + 83)
MCIERR_SEQ_PORT_MISCERROR = (MCIERR_BASE + 84)
MCIERR_SEQ_TIMER = (MCIERR_BASE + 85)
MCIERR_SEQ_PORTUNSPECIFIED = (MCIERR_BASE + 86)
MCIERR_SEQ_NOMIDIPRESENT = (MCIERR_BASE + 87)
MCIERR_NO_WINDOW = (MCIERR_BASE + 90)
MCIERR_CREATEWINDOW = (MCIERR_BASE + 91)
MCIERR_FILE_READ = (MCIERR_BASE + 92)
MCIERR_FILE_WRITE = (MCIERR_BASE + 93)
MCIERR_NO_IDENTITY = (MCIERR_BASE + 94)
MCIERR_CUSTOM_DRIVER_BASE = (MCIERR_BASE + 256)
MCI_FIRST = DRV_MCI_FIRST
MCI_OPEN = 0x0803
MCI_CLOSE = 0x0804
MCI_ESCAPE = 0x0805
MCI_PLAY = 0x0806
MCI_SEEK = 0x0807
MCI_STOP = 0x0808
MCI_PAUSE = 0x0809
MCI_INFO = 0x080A
MCI_GETDEVCAPS = 0x080B
MCI_SPIN = 0x080C
MCI_SET = 0x080D
MCI_STEP = 0x080E
MCI_RECORD = 0x080F
MCI_SYSINFO = 0x0810
MCI_BREAK = 0x0811
MCI_SAVE = 0x0813
MCI_STATUS = 0x0814
MCI_CUE = 0x0830
MCI_REALIZE = 0x0840
MCI_WINDOW = 0x0841
MCI_PUT = 0x0842
MCI_WHERE = 0x0843
MCI_FREEZE = 0x0844
MCI_UNFREEZE = 0x0845
MCI_LOAD = 0x0850
MCI_CUT = 0x0851
MCI_COPY = 0x0852
MCI_PASTE = 0x0853
MCI_UPDATE = 0x0854
MCI_RESUME = 0x0855
MCI_DELETE = 0x0856
MCI_USER_MESSAGES = (DRV_MCI_FIRST + 0x400)
MCI_LAST = 0x0FFF
MCI_DEVTYPE_VCR = 513
MCI_DEVTYPE_VIDEODISC = 514
MCI_DEVTYPE_OVERLAY = 515
MCI_DEVTYPE_CD_AUDIO = 516
MCI_DEVTYPE_DAT = 517
MCI_DEVTYPE_SCANNER = 518
MCI_DEVTYPE_ANIMATION = 519
MCI_DEVTYPE_DIGITAL_VIDEO = 520
MCI_DEVTYPE_OTHER = 521
MCI_DEVTYPE_WAVEFORM_AUDIO = 522
MCI_DEVTYPE_SEQUENCER = 523
MCI_DEVTYPE_FIRST = MCI_DEVTYPE_VCR
MCI_DEVTYPE_LAST = MCI_DEVTYPE_SEQUENCER
MCI_DEVTYPE_FIRST_USER = 0x1000
MCI_MODE_NOT_READY = (MCI_STRING_OFFSET + 12)
MCI_MODE_STOP = (MCI_STRING_OFFSET + 13)
MCI_MODE_PLAY = (MCI_STRING_OFFSET + 14)
MCI_MODE_RECORD = (MCI_STRING_OFFSET + 15)
MCI_MODE_SEEK = (MCI_STRING_OFFSET + 16)
MCI_MODE_PAUSE = (MCI_STRING_OFFSET + 17)
MCI_MODE_OPEN = (MCI_STRING_OFFSET + 18)
MCI_FORMAT_MILLISECONDS = 0
MCI_FORMAT_HMS = 1
MCI_FORMAT_MSF = 2
MCI_FORMAT_FRAMES = 3
MCI_FORMAT_SMPTE_24 = 4
MCI_FORMAT_SMPTE_25 = 5
MCI_FORMAT_SMPTE_30 = 6
MCI_FORMAT_SMPTE_30DROP = 7
MCI_FORMAT_BYTES = 8
MCI_FORMAT_SAMPLES = 9
MCI_FORMAT_TMSF = 10
def MCI_MSF_MINUTE(msf): return ((BYTE)(msf))
def MCI_MSF_SECOND(msf): return ((BYTE)(((WORD)(msf)) >> 8))
def MCI_MSF_FRAME(msf): return ((BYTE)((msf)>>16))
def MCI_TMSF_TRACK(tmsf): return ((BYTE)(tmsf))
def MCI_TMSF_MINUTE(tmsf): return ((BYTE)(((WORD)(tmsf)) >> 8))
def MCI_TMSF_SECOND(tmsf): return ((BYTE)((tmsf)>>16))
def MCI_TMSF_FRAME(tmsf): return ((BYTE)((tmsf)>>24))
def MCI_HMS_HOUR(hms): return ((BYTE)(hms))
def MCI_HMS_MINUTE(hms): return ((BYTE)(((WORD)(hms)) >> 8))
def MCI_HMS_SECOND(hms): return ((BYTE)((hms)>>16))
MCI_NOTIFY_SUCCESSFUL = 0x0001
MCI_NOTIFY_SUPERSEDED = 0x0002
MCI_NOTIFY_ABORTED = 0x0004
MCI_NOTIFY_FAILURE = 0x0008
MCI_NOTIFY = 0x00000001
MCI_WAIT = 0x00000002
MCI_FROM = 0x00000004
MCI_TO = 0x00000008
MCI_TRACK = 0x00000010
MCI_OPEN_SHAREABLE = 0x00000100
MCI_OPEN_ELEMENT = 0x00000200
MCI_OPEN_ALIAS = 0x00000400
MCI_OPEN_ELEMENT_ID = 0x00000800
MCI_OPEN_TYPE_ID = 0x00001000
MCI_OPEN_TYPE = 0x00002000
MCI_SEEK_TO_START = 0x00000100
MCI_SEEK_TO_END = 0x00000200
MCI_STATUS_ITEM = 0x00000100
MCI_STATUS_START = 0x00000200
MCI_STATUS_LENGTH = 0x00000001
MCI_STATUS_POSITION = 0x00000002
MCI_STATUS_NUMBER_OF_TRACKS = 0x00000003
MCI_STATUS_MODE = 0x00000004
MCI_STATUS_MEDIA_PRESENT = 0x00000005
MCI_STATUS_TIME_FORMAT = 0x00000006
MCI_STATUS_READY = 0x00000007
MCI_STATUS_CURRENT_TRACK = 0x00000008
MCI_INFO_PRODUCT = 0x00000100
MCI_INFO_FILE = 0x00000200
MCI_INFO_MEDIA_UPC = 0x00000400
MCI_INFO_MEDIA_IDENTITY = 0x00000800
MCI_INFO_NAME = 0x00001000
MCI_INFO_COPYRIGHT = 0x00002000
MCI_GETDEVCAPS_ITEM = 0x00000100
MCI_GETDEVCAPS_CAN_RECORD = 0x00000001
MCI_GETDEVCAPS_HAS_AUDIO = 0x00000002
MCI_GETDEVCAPS_HAS_VIDEO = 0x00000003
MCI_GETDEVCAPS_DEVICE_TYPE = 0x00000004
MCI_GETDEVCAPS_USES_FILES = 0x00000005
MCI_GETDEVCAPS_COMPOUND_DEVICE = 0x00000006
MCI_GETDEVCAPS_CAN_EJECT = 0x00000007
MCI_GETDEVCAPS_CAN_PLAY = 0x00000008
MCI_GETDEVCAPS_CAN_SAVE = 0x00000009
MCI_SYSINFO_QUANTITY = 0x00000100
MCI_SYSINFO_OPEN = 0x00000200
MCI_SYSINFO_NAME = 0x00000400
MCI_SYSINFO_INSTALLNAME = 0x00000800
MCI_SET_DOOR_OPEN = 0x00000100
MCI_SET_DOOR_CLOSED = 0x00000200
MCI_SET_TIME_FORMAT = 0x00000400
MCI_SET_AUDIO = 0x00000800
MCI_SET_VIDEO = 0x00001000
MCI_SET_ON = 0x00002000
MCI_SET_OFF = 0x00004000
MCI_SET_AUDIO_ALL = 0x00000000
MCI_SET_AUDIO_LEFT = 0x00000001
MCI_SET_AUDIO_RIGHT = 0x00000002
MCI_BREAK_KEY = 0x00000100
MCI_BREAK_HWND = 0x00000200
MCI_BREAK_OFF = 0x00000400
MCI_RECORD_INSERT = 0x00000100
MCI_RECORD_OVERWRITE = 0x00000200
MCI_SAVE_FILE = 0x00000100
MCI_LOAD_FILE = 0x00000100
MCI_VD_MODE_PARK = (MCI_VD_OFFSET + 1)
MCI_VD_MEDIA_CLV = (MCI_VD_OFFSET + 2)
MCI_VD_MEDIA_CAV = (MCI_VD_OFFSET + 3)
MCI_VD_MEDIA_OTHER = (MCI_VD_OFFSET + 4)
MCI_VD_FORMAT_TRACK = 0x4001
MCI_VD_PLAY_REVERSE = 0x00010000
MCI_VD_PLAY_FAST = 0x00020000
MCI_VD_PLAY_SPEED = 0x00040000
MCI_VD_PLAY_SCAN = 0x00080000
MCI_VD_PLAY_SLOW = 0x00100000
MCI_VD_SEEK_REVERSE = 0x00010000
MCI_VD_STATUS_SPEED = 0x00004002
MCI_VD_STATUS_FORWARD = 0x00004003
MCI_VD_STATUS_MEDIA_TYPE = 0x00004004
MCI_VD_STATUS_SIDE = 0x00004005
MCI_VD_STATUS_DISC_SIZE = 0x00004006
MCI_VD_GETDEVCAPS_CLV = 0x00010000
MCI_VD_GETDEVCAPS_CAV = 0x00020000
MCI_VD_SPIN_UP = 0x00010000
MCI_VD_SPIN_DOWN = 0x00020000
MCI_VD_GETDEVCAPS_CAN_REVERSE = 0x00004002
MCI_VD_GETDEVCAPS_FAST_RATE = 0x00004003
MCI_VD_GETDEVCAPS_SLOW_RATE = 0x00004004
MCI_VD_GETDEVCAPS_NORMAL_RATE = 0x00004005
MCI_VD_STEP_FRAMES = 0x00010000
MCI_VD_STEP_REVERSE = 0x00020000
MCI_VD_ESCAPE_STRING = 0x00000100
MCI_CDA_STATUS_TYPE_TRACK = 0x00004001
MCI_CDA_TRACK_AUDIO = (MCI_CD_OFFSET + 0)
MCI_CDA_TRACK_OTHER = (MCI_CD_OFFSET + 1)
MCI_WAVE_PCM = (MCI_WAVE_OFFSET + 0)
MCI_WAVE_MAPPER = (MCI_WAVE_OFFSET + 1)
MCI_WAVE_OPEN_BUFFER = 0x00010000
MCI_WAVE_SET_FORMATTAG = 0x00010000
MCI_WAVE_SET_CHANNELS = 0x00020000
MCI_WAVE_SET_SAMPLESPERSEC = 0x00040000
MCI_WAVE_SET_AVGBYTESPERSEC = 0x00080000
MCI_WAVE_SET_BLOCKALIGN = 0x00100000
MCI_WAVE_SET_BITSPERSAMPLE = 0x00200000
MCI_WAVE_INPUT = 0x00400000
MCI_WAVE_OUTPUT = 0x00800000
MCI_WAVE_STATUS_FORMATTAG = 0x00004001
MCI_WAVE_STATUS_CHANNELS = 0x00004002
MCI_WAVE_STATUS_SAMPLESPERSEC = 0x00004003
MCI_WAVE_STATUS_AVGBYTESPERSEC = 0x00004004
MCI_WAVE_STATUS_BLOCKALIGN = 0x00004005
MCI_WAVE_STATUS_BITSPERSAMPLE = 0x00004006
MCI_WAVE_STATUS_LEVEL = 0x00004007
MCI_WAVE_SET_ANYINPUT = 0x04000000
MCI_WAVE_SET_ANYOUTPUT = 0x08000000
MCI_WAVE_GETDEVCAPS_INPUTS = 0x00004001
MCI_WAVE_GETDEVCAPS_OUTPUTS = 0x00004002
MCI_SEQ_DIV_PPQN = (0 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_24 = (1 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_25 = (2 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_30DROP = (3 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_30 = (4 + MCI_SEQ_OFFSET)
MCI_SEQ_FORMAT_SONGPTR = 0x4001
MCI_SEQ_FILE = 0x4002
MCI_SEQ_MIDI = 0x4003
MCI_SEQ_SMPTE = 0x4004
MCI_SEQ_NONE = 65533
MCI_SEQ_MAPPER = 65535
MCI_SEQ_STATUS_TEMPO = 0x00004002
MCI_SEQ_STATUS_PORT = 0x00004003
MCI_SEQ_STATUS_SLAVE = 0x00004007
MCI_SEQ_STATUS_MASTER = 0x00004008
MCI_SEQ_STATUS_OFFSET = 0x00004009
MCI_SEQ_STATUS_DIVTYPE = 0x0000400A
MCI_SEQ_STATUS_NAME = 0x0000400B
MCI_SEQ_STATUS_COPYRIGHT = 0x0000400C
MCI_SEQ_SET_TEMPO = 0x00010000
MCI_SEQ_SET_PORT = 0x00020000
MCI_SEQ_SET_SLAVE = 0x00040000
MCI_SEQ_SET_MASTER = 0x00080000
MCI_SEQ_SET_OFFSET = 0x01000000
MCI_ANIM_OPEN_WS = 0x00010000
MCI_ANIM_OPEN_PARENT = 0x00020000
MCI_ANIM_OPEN_NOSTATIC = 0x00040000
MCI_ANIM_PLAY_SPEED = 0x00010000
MCI_ANIM_PLAY_REVERSE = 0x00020000
MCI_ANIM_PLAY_FAST = 0x00040000
MCI_ANIM_PLAY_SLOW = 0x00080000
MCI_ANIM_PLAY_SCAN = 0x00100000
MCI_ANIM_STEP_REVERSE = 0x00010000
MCI_ANIM_STEP_FRAMES = 0x00020000
MCI_ANIM_STATUS_SPEED = 0x00004001
MCI_ANIM_STATUS_FORWARD = 0x00004002
MCI_ANIM_STATUS_HWND = 0x00004003
MCI_ANIM_STATUS_HPAL = 0x00004004
MCI_ANIM_STATUS_STRETCH = 0x00004005
MCI_ANIM_INFO_TEXT = 0x00010000
MCI_ANIM_GETDEVCAPS_CAN_REVERSE = 0x00004001
MCI_ANIM_GETDEVCAPS_FAST_RATE = 0x00004002
MCI_ANIM_GETDEVCAPS_SLOW_RATE = 0x00004003
MCI_ANIM_GETDEVCAPS_NORMAL_RATE = 0x00004004
MCI_ANIM_GETDEVCAPS_PALETTES = 0x00004006
MCI_ANIM_GETDEVCAPS_CAN_STRETCH = 0x00004007
MCI_ANIM_GETDEVCAPS_MAX_WINDOWS = 0x00004008
MCI_ANIM_REALIZE_NORM = 0x00010000
MCI_ANIM_REALIZE_BKGD = 0x00020000
MCI_ANIM_WINDOW_HWND = 0x00010000
MCI_ANIM_WINDOW_STATE = 0x00040000
MCI_ANIM_WINDOW_TEXT = 0x00080000
MCI_ANIM_WINDOW_ENABLE_STRETCH = 0x00100000
MCI_ANIM_WINDOW_DISABLE_STRETCH = 0x00200000
MCI_ANIM_WINDOW_DEFAULT = 0x00000000
MCI_ANIM_RECT = 0x00010000
MCI_ANIM_PUT_SOURCE = 0x00020000
MCI_ANIM_PUT_DESTINATION = 0x00040000
MCI_ANIM_WHERE_SOURCE = 0x00020000
MCI_ANIM_WHERE_DESTINATION = 0x00040000
MCI_ANIM_UPDATE_HDC = 0x00020000
MCI_OVLY_OPEN_WS = 0x00010000
MCI_OVLY_OPEN_PARENT = 0x00020000
MCI_OVLY_STATUS_HWND = 0x00004001
MCI_OVLY_STATUS_STRETCH = 0x00004002
MCI_OVLY_INFO_TEXT = 0x00010000
MCI_OVLY_GETDEVCAPS_CAN_STRETCH = 0x00004001
MCI_OVLY_GETDEVCAPS_CAN_FREEZE = 0x00004002
MCI_OVLY_GETDEVCAPS_MAX_WINDOWS = 0x00004003
MCI_OVLY_WINDOW_HWND = 0x00010000
MCI_OVLY_WINDOW_STATE = 0x00040000
MCI_OVLY_WINDOW_TEXT = 0x00080000
MCI_OVLY_WINDOW_ENABLE_STRETCH = 0x00100000
MCI_OVLY_WINDOW_DISABLE_STRETCH = 0x00200000
MCI_OVLY_WINDOW_DEFAULT = 0x00000000
MCI_OVLY_RECT = 0x00010000
MCI_OVLY_PUT_SOURCE = 0x00020000
MCI_OVLY_PUT_DESTINATION = 0x00040000
MCI_OVLY_PUT_FRAME = 0x00080000
MCI_OVLY_PUT_VIDEO = 0x00100000
MCI_OVLY_WHERE_SOURCE = 0x00020000
MCI_OVLY_WHERE_DESTINATION = 0x00040000
MCI_OVLY_WHERE_FRAME = 0x00080000
MCI_OVLY_WHERE_VIDEO = 0x00100000
SELECTDIB = 41
def DIBINDEX(n): return MAKELONG((n),0x10FF)
|
hudokkow/kodi-cmake
|
refs/heads/master
|
lib/libUPnP/Neptune/Extras/Scripts/GenTrustAnchorsTables.py
|
264
|
#! /usr/bin/env python
###
### Generate trust anchor tables from a text file
### like, for example, TLS-Trust-Anchors-base.crt
## and TLS-Trust-Anchors-extended.crt located under Extras/Data
###
### imports
import sys
import base64
### generate a C file with bult-in TLS trust anchors
FILE_HEADER = """/*****************************************************************
|
| Neptune - Trust Anchors
|
| This file is automatically generated by a script, do not edit!
|
| Copyright (c) 2002-2010, Axiomatic Systems, LLC.
| All rights reserved.
|
| Redistribution and use in source and binary forms, with or without
| modification, are permitted provided that the following conditions are met:
| * Redistributions of source code must retain the above copyright
| notice, this list of conditions and the following disclaimer.
| * Redistributions in binary form must reproduce the above copyright
| notice, this list of conditions and the following disclaimer in the
| documentation and/or other materials provided with the distribution.
| * Neither the name of Axiomatic Systems nor the
| names of its contributors may be used to endorse or promote products
| derived from this software without specific prior written permission.
|
| THIS SOFTWARE IS PROVIDED BY AXIOMATIC SYSTEMS ''AS IS'' AND ANY
| EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
| WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
| DISCLAIMED. IN NO EVENT SHALL AXIOMATIC SYSTEMS BE LIABLE FOR ANY
| DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
| (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
| LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
| ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
| SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
****************************************************************/
"""
if len(sys.argv) != 3:
print "usage: GenTrustAnchosTable.py <input-file> <category>"
print " where category may be 'Base', 'Extended', or other"
sys.exit(1)
INPUT_FILE = sys.argv[1]
CERT_CATEGORY = sys.argv[2]
digest_oid_pattern = "\x2a\x86\x48\x86\xf7\x0d\x01\x01"
in_cert = False
prev = ''
prev_prev = ''
index = 0
Certs = []
CertNames = []
CertComments = []
for line in open(sys.argv[1]).readlines():
if line.startswith('-----BEGIN CERTIFICATE-----'):
in_cert = True
b64 = ''
continue;
if line.startswith('-----END CERTIFICATE-----'):
cert = base64.decodestring(b64);
if not digest_oid_pattern in cert:
sys.stderr.write("-------- skipping cert (digest not supported) -------\n")
continue
Certs.append(cert)
cert_name = 'NptTlsTrustAnchor_%s_%04d' % (CERT_CATEGORY, index)
#cert_comment = eval('"'+prev_prev.rstrip('\r\n')+'"')
cert_comment = prev_prev.rstrip('\r\n')
CertNames.append(cert_name)
CertComments.append(cert_comment)
out = open(CERT_CATEGORY+'/'+cert_name+'.cpp', 'w+b')
out.write(FILE_HEADER)
out.write('/* %s */\n' % (cert_comment))
out.write('const unsigned char %s_Data[%d] = {\n' % (cert_name, len(cert)))
counter = 0
sep = ''
for byte in cert:
out.write('%s0x%02x' % (sep, ord(byte)))
counter += 1
sep = ','
if counter == 8:
out.write('\n')
counter = 0
in_cert = False
out.write('};\n')
out.write('const unsigned int %s_Size = %d;\n' % (cert_name, len(cert)))
index += 1
out.close()
continue
if in_cert:
b64 += line.rstrip('\r\n')
else:
prev_prev = prev
prev = line
out = open('NptTlsDefaultTrustAnchors'+CERT_CATEGORY+'.cpp', 'w+b')
out.write(FILE_HEADER)
out.write("/* This file is automatically generated by GenTrustAnchorsTables.py, do not edit */\n\n")
out.write('#include "NptTls.h"\n')
total_size = 0
for i in xrange(0, len(CertNames)):
out.write('#include "'+CERT_CATEGORY+'/'+CertNames[i]+'.cpp" /* '+CertComments[i]+' */\n')
total_size += len(Certs[i])
out.write("/* total anchors size ="+ str(total_size)+" */\n\n")
out.write('const NPT_TlsTrustAnchorData NptTlsDefaultTrustAnchors%s[%s] = {\r\n' % (CERT_CATEGORY, 1+len(Certs)))
sep = ' '
for i in xrange(0, len(Certs)):
out.write('%s{ %s_Data, %s_Size} /* %s */' % (sep, CertNames[i], CertNames[i], CertComments[i]))
sep = ',\r\n '
out.write(sep+'{0, 0} /* sentinel */\n')
out.write('};\n')
out.close()
out = open('NptTlsDefaultTrustAnchors'+CERT_CATEGORY+'.h', 'w+b')
out.write(FILE_HEADER)
out.write("/* This file is automatically generated by GenTrustAnchorsTables.py, do not edit */\n\n")
out.write('#include "NptTls.h"\n\n')
out.write('extern const NPT_TlsTrustAnchorData NptTlsDefaultTrustAnchors%s[%d];\n\n' % (CERT_CATEGORY, 1+len(Certs)))
for i in xrange(0, len(CertNames)):
out.write('/* '+CertComments[i]+' */\n')
out.write('extern const unsigned int %s_Size;\n' % (CertNames[i]))
out.write('extern const unsigned char %s_Data[];\n\n' % (CertNames[i]))
out.close()
|
mindnervestech/mnrp
|
refs/heads/master
|
addons/website_forum_doc/models/__init__.py
|
432
|
# -*- coding: utf-8 -*-
import documentation
|
misttechnologies/selenium
|
refs/heads/master
|
py/test/selenium/webdriver/firefox/ff_select_support_class_tests.py
|
29
|
#!/usr/bin/python
#
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import select_class_tests
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
FirefoxSelectElementHandlingTests.webserver = webserver
FirefoxSelectElementHandlingTests.driver = webdriver.Firefox()
class FirefoxSelectElementHandlingTests(select_class_tests.WebDriverSelectSupportTests):
pass
def teardown_module(module):
FirefoxSelectElementHandlingTests.driver.quit()
FirefoxSelectElementHandlingTests.webserver.stop()
|
edx-solutions/edx-platform
|
refs/heads/master
|
common/djangoapps/student/middleware.py
|
4
|
"""
Middleware that checks user standing for the purpose of keeping users with
disabled accounts from accessing the site.
"""
from django.conf import settings
from django.http import HttpResponseForbidden
from django.utils.deprecation import MiddlewareMixin
from django.utils.translation import ugettext as _
from openedx.core.djangolib.markup import HTML, Text
from student.models import UserStanding
class UserStandingMiddleware(MiddlewareMixin):
"""
Checks a user's standing on request. Returns a 403 if the user's
status is 'disabled'.
"""
def process_request(self, request):
user = request.user
try:
user_account = UserStanding.objects.get(user=user.id)
# because user is a unique field in UserStanding, there will either be
# one or zero user_accounts associated with a UserStanding
except UserStanding.DoesNotExist:
pass
else:
if user_account.account_status == UserStanding.ACCOUNT_DISABLED:
msg = Text(_(
'Your account has been disabled. If you believe '
'this was done in error, please contact us at '
'{support_email}'
)).format(
support_email=HTML(u'<a href="mailto:{address}?subject={subject_line}">{address}</a>').format(
address=settings.DEFAULT_FEEDBACK_EMAIL,
subject_line=_('Disabled Account'),
),
)
return HttpResponseForbidden(msg)
|
Leila20/django
|
refs/heads/master
|
tests/select_related/__init__.py
|
12133432
| |
ngageoint/scale
|
refs/heads/master
|
scale/storage/brokers/__init__.py
|
12133432
| |
RDXT/django-userena
|
refs/heads/master
|
userena/contrib/umessages/templatetags/__init__.py
|
12133432
| |
sfrenza/test-for-bot
|
refs/heads/master
|
venv/Lib/site-packages/pip/_vendor/packaging/markers.py
|
324
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pip._vendor.pyparsing import (
ParseException, ParseResults, stringStart, stringEnd,
)
from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pip._vendor.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("os.name") | # PEP-345
L("sys.platform") | # PEP-345
L("platform.version") | # PEP-345
L("platform.machine") | # PEP-345
L("platform.python_implementation") | # PEP-345
L("python_implementation") | # undocumented setuptools legacy
L("extra")
)
ALIASES = {
'os.name': 'os_name',
'sys.platform': 'sys_platform',
'platform.version': 'platform_version',
'platform.machine': 'platform_machine',
'platform.python_implementation': 'platform_python_implementation',
'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
|
savkov/randhy
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup
version = open('VERSION').read()
long_description = open('README.md').read()
__author__ = 'Sasho Savkov'
__credits__ = ["William Morgan"]
__license__ = "MIT"
__version__ = version
__email__ = "me@sasho.io"
__status__ = "Production"
setup(
name='randhy',
version=version,
description='Approximate randomisation library',
long_description=long_description,
long_description_content_type='text/markdown', # This is important!
author='Sasho Savkov',
author_email='me@sasho.io',
url='https://www.github.com/asavkov/randhy/',
package_dir={'': 'src'},
packages=['randhy']
)
|
sharhar/USB-Thing
|
refs/heads/master
|
UpdaterFiles/Lib/python-3.5.1.amd64/Lib/unittest/test/test_case.py
|
8
|
import contextlib
import difflib
import pprint
import pickle
import re
import sys
import logging
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from unittest.test.support import (
TestEquality, TestHashing, LoggingResult, LegacyLoggingResult,
ResultWithNoStartTestRunStopTestRun
)
from test.support import captured_stderr
log_foo = logging.getLogger('foo')
log_foobar = logging.getLogger('foo.bar')
log_quux = logging.getLogger('quux')
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
# Order is the following:
# i=1 => subtest failure
# i=2, j=2 => subtest success
# i=2, j=3 => subtest error
# i=3, j=2 => subtest error
# i=3, j=3 => subtest success
# toplevel => error
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
# With a legacy result object (without a addSubTest method),
# text execution stops after the first subtest failure.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition
# to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown']
+ 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
# With a legacy result, only the whole test success is recorded.
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSuccess', 'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
# Ensure proper test flow with subtests and failfast (issue #22894)
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**5
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**4)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**6)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertEqual_shorten(self):
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p))
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d))
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testAssertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesCallable(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaises(ExceptionMock, Stub)
# A tuple of exception classes is accepted
self.assertRaises((ValueError, ExceptionMock), Stub)
# *args and **kwargs also work
self.assertRaises(ValueError, int, '19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
self.assertRaises(ExceptionMock, lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertRaises(ExceptionMock, None)
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesContext(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
with self.assertRaises(ExceptionMock):
Stub()
# A tuple of exception classes is accepted
with self.assertRaises((ValueError, ExceptionMock)) as cm:
Stub()
# The context manager exposes caught exception
self.assertIsInstance(cm.exception, ExceptionMock)
self.assertEqual(cm.exception.args[0], 'We expect')
# *args and **kwargs also work
with self.assertRaises(ValueError):
int('19', base=8)
# Failure when no exception is raised
with self.assertRaises(self.failureException):
with self.assertRaises(ExceptionMock):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaises(ExceptionMock, msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertRaises(ExceptionMock, foobar=42):
pass
# Failure when another exception is raised
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaises()
with self.assertRaises(TypeError):
self.assertRaises(1)
with self.assertRaises(TypeError):
self.assertRaises(object)
with self.assertRaises(TypeError):
self.assertRaises((ValueError, 1))
with self.assertRaises(TypeError):
self.assertRaises((ValueError, object))
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
with self.assertWarns(DeprecationWarning):
self.assertRaisesRegex(ExceptionMock, 'expect$', None)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaisesRegex(Exception, 'expect', msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertRaisesRegex(Exception, 'expect', foobar=42):
pass
def testAssertRaisesRegexInvalidRegex(self):
# Issue 20145.
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda: True)
def testAssertWarnsRegexInvalidRegex(self):
# Issue 20145.
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda: True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertRaisesRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaisesRegex()
with self.assertRaises(TypeError):
self.assertRaisesRegex(ValueError)
with self.assertRaises(TypeError):
self.assertRaisesRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, 1), 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, object), 'expect')
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertWarns(RuntimeWarning, None)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarns(RuntimeWarning, msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertWarns(RuntimeWarning, foobar=42):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarns()
with self.assertRaises(TypeError):
self.assertWarns(1)
with self.assertRaises(TypeError):
self.assertWarns(object)
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, 1))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, object))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, Exception))
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when the function is None
with self.assertWarns(DeprecationWarning):
self.assertWarnsRegex(RuntimeWarning, "o+", None)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Custom message
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarnsRegex(RuntimeWarning, 'o+', msg='foobar'):
pass
# Invalid keyword argument
with self.assertWarnsRegex(DeprecationWarning, 'foobar'), \
self.assertRaises(AssertionError):
with self.assertWarnsRegex(RuntimeWarning, 'o+', foobar=42):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testAssertWarnsRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarnsRegex()
with self.assertRaises(TypeError):
self.assertWarnsRegex(UserWarning)
with self.assertRaises(TypeError):
self.assertWarnsRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, 1), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, object), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, Exception), 'expect')
@contextlib.contextmanager
def assertNoStderr(self):
with captured_stderr() as buf:
yield
self.assertEqual(buf.getvalue(), "")
def assertLogRecords(self, records, matches):
self.assertEqual(len(records), len(matches))
for rec, match in zip(records, matches):
self.assertIsInstance(rec, logging.LogRecord)
for k, v in match.items():
self.assertEqual(getattr(rec, k), v)
def testAssertLogsDefaults(self):
# defaults: root logger, level INFO
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
self.assertEqual(cm.output, ["INFO:foo:1"])
self.assertLogRecords(cm.records, [{'name': 'foo'}])
def testAssertLogsTwoMatchingMessages(self):
# Same, but with two matching log messages
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "WARNING:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'quux'}])
def checkAssertLogsPerLevel(self, level):
# Check level filtering
with self.assertNoStderr():
with self.assertLogs(level=level) as cm:
log_foo.warning("1")
log_foobar.error("2")
log_quux.critical("3")
self.assertEqual(cm.output, ["ERROR:foo.bar:2", "CRITICAL:quux:3"])
self.assertLogRecords(cm.records,
[{'name': 'foo.bar'}, {'name': 'quux'}])
def testAssertLogsPerLevel(self):
self.checkAssertLogsPerLevel(logging.ERROR)
self.checkAssertLogsPerLevel('ERROR')
def checkAssertLogsPerLogger(self, logger):
# Check per-logger filtering
with self.assertNoStderr():
with self.assertLogs(level='DEBUG') as outer_cm:
with self.assertLogs(logger, level='DEBUG') as cm:
log_foo.info("1")
log_foobar.debug("2")
log_quux.warning("3")
self.assertEqual(cm.output, ["INFO:foo:1", "DEBUG:foo.bar:2"])
self.assertLogRecords(cm.records,
[{'name': 'foo'}, {'name': 'foo.bar'}])
# The outer catchall caught the quux log
self.assertEqual(outer_cm.output, ["WARNING:quux:3"])
def testAssertLogsPerLogger(self):
self.checkAssertLogsPerLogger(logging.getLogger('foo'))
self.checkAssertLogsPerLogger('foo')
def testAssertLogsFailureNoLogs(self):
# Failure due to no logs
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs():
pass
def testAssertLogsFailureLevelTooHigh(self):
# Failure due to level too high
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs(level='WARNING'):
log_foo.info("1")
def testAssertLogsFailureMismatchingLogger(self):
# Failure due to mismatching logger (and the logged message is
# passed through)
with self.assertLogs('quux', level='ERROR'):
with self.assertRaises(self.failureException):
with self.assertLogs('foo'):
log_quux.error("1")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
def test_no_exception_leak(self):
# Issue #19880: TestCase.run() should not keep a reference
# to the exception
class MyException(Exception):
ninstance = 0
def __init__(self):
MyException.ninstance += 1
Exception.__init__(self)
def __del__(self):
MyException.ninstance -= 1
class TestCase(unittest.TestCase):
def test1(self):
raise MyException()
@unittest.expectedFailure
def test2(self):
raise MyException()
for method_name in ('test1', 'test2'):
testcase = TestCase(method_name)
testcase.run()
self.assertEqual(MyException.ninstance, 0)
if __name__ == "__main__":
unittest.main()
|
firstopinion/prom
|
refs/heads/master
|
prom/interface/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import dsnparse
from ..config import DsnConnection
interfaces = {}
"""holds all the configured interfaces"""
def configure_environ(dsn_env_name='PROM_DSN', connection_class=DsnConnection):
"""
configure interfaces based on environment variables
by default, when prom is imported, it will look for PROM_DSN, and PROM_DSN_N (where
N is 1 through infinity) in the environment, if it finds them, it will assume they
are dsn urls that prom understands and will configure db connections with them. If you
don't want this behavior (ie, you want to configure prom manually) then just make sure
you don't have any environment variables with matching names
The num checks (eg PROM_DSN_1, PROM_DSN_2) go in order, so you can't do PROM_DSN_1, PROM_DSN_3,
because it will fail on _2 and move on, so make sure your num dsns are in order (eg, 1, 2, 3, ...)
example --
export PROM_DSN_1=some.Interface://host:port/dbname#i1
export PROM_DSN_2=some.Interface://host2:port/dbname2#i2
$ python
>>> import prom
>>> print prom.interfaces # prints a dict with interfaces i1 and i2 keys
:param dsn_env_name: string, the name of the environment variables
"""
inters = []
cs = dsnparse.parse_environs(dsn_env_name, parse_class=connection_class)
for c in cs:
inter = c.interface
set_interface(inter, c.name)
inters.append(inter)
return inters
def configure(dsn, connection_class=DsnConnection):
"""
configure an interface to be used to query a backend
you use this function to configure an Interface using a dsn, then you can get
that interface using the get_interface() method
dsn -- string -- a properly formatted prom dsn, see DsnConnection for how to format the dsn
"""
c = dsnparse.parse(dsn, parse_class=connection_class)
inter = c.interface
set_interface(inter, c.name)
return inter
def get_interfaces():
global interfaces
if not interfaces:
configure_environ()
return interfaces
def set_interface(interface, name=''):
"""
don't want to bother with a dsn? Use this method to make an interface available
"""
global interfaces
if not interface: raise ValueError('interface is empty')
# close down the interface before we discard it
if name in interfaces:
interfaces[name].close()
interfaces[name] = interface
def get_interface(name=''):
"""
get an interface that was created using configure()
name -- string -- the name of the connection for the interface to return
"""
global interfaces
if not interfaces:
configure_environ()
return interfaces[name]
|
joachimmetz/l2tdevtools
|
refs/heads/main
|
tests/update.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the update tool."""
import sys
import unittest
from tools import update
from tests import test_lib
class GithubRepoDownloadHelperTest(test_lib.BaseTestCase):
"""Tests for the GitHub repo download helper class."""
# pylint: disable=protected-access
_DOWNLOAD_URL = 'https://github.com/ForensicArtifacts/artifacts/releases'
_PROJECT_NAME = 'artifacts'
_PROJECT_VERSION = '20210404'
def testGetPackageDownloadURLs(self):
"""Tests the GetPackageDownloadURLs function."""
download_helper = update.GithubRepoDownloadHelper(self._DOWNLOAD_URL)
package_download_urls = download_helper.GetPackageDownloadURLs(
preferred_machine_type='x86', preferred_operating_system='Windows')
if (sys.version_info[0], sys.version_info[1]) not in (
download_helper._SUPPORTED_PYTHON_VERSIONS):
self.assertIsNone(package_download_urls)
else:
self.assertIsNotNone(package_download_urls)
expected_url = (
'https://github.com/log2timeline/l2tbinaries/raw/main/win32/'
'{0:s}-{1:s}.1.win32.msi').format(
self._PROJECT_NAME, self._PROJECT_VERSION)
self.assertIn(expected_url, package_download_urls)
class DependencyUpdaterTest(test_lib.BaseTestCase):
"""Tests for the dependency updater class."""
# pylint: disable=protected-access
_PROJECT_NAME = 'dfvfs'
_PROJECT_VERSION = '20210606'
def testGetAvailablePackages(self):
"""Tests the _GetAvailablePackages function."""
dependency_updater = update.DependencyUpdater(
preferred_machine_type='x86', preferred_operating_system='Windows')
available_packages = dependency_updater._GetAvailablePackages()
if (sys.version_info[0], sys.version_info[1]) not in (
update.GithubRepoDownloadHelper._SUPPORTED_PYTHON_VERSIONS):
self.assertEqual(available_packages, [])
else:
self.assertNotEqual(available_packages, [])
for package_download in available_packages:
if package_download.name == self._PROJECT_NAME:
expected_package_filename = '{0:s}-{1:s}.1.win32.msi'.format(
self._PROJECT_NAME, self._PROJECT_VERSION)
self.assertEqual(package_download.filename, expected_package_filename)
expected_package_version = [self._PROJECT_VERSION, '1']
self.assertEqual(package_download.version, expected_package_version)
if __name__ == '__main__':
unittest.main()
|
sivaprakashniet/push_pull
|
refs/heads/master
|
p2p/lib/python2.7/site-packages/django/middleware/gzip.py
|
478
|
import re
from django.utils.cache import patch_vary_headers
from django.utils.text import compress_sequence, compress_string
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
patch_vary_headers(response, ('Accept-Encoding',))
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response['Content-Length']
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
response['Content-Encoding'] = 'gzip'
return response
|
xavierwu/scikit-learn
|
refs/heads/master
|
sklearn/gaussian_process/correlation_models.py
|
230
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
"""
The built-in correlation models submodule for the gaussian_process module.
"""
import numpy as np
def absolute_exponential(theta, d):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i| )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.abs(np.asarray(d, dtype=np.float))
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(- theta[0] * np.sum(d, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(- np.sum(theta.reshape(1, n_features) * d, axis=1))
def squared_exponential(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * (d_i)^2 )
i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def generalized_exponential(theta, d):
"""
Generalized exponential correlation model.
(Useful when one does not know the smoothness of the function to be
predicted.)::
n
theta, d --> r(theta, d) = exp( sum - theta_i * |d_i|^p )
i = 1
Parameters
----------
theta : array_like
An array with shape 1+1 (isotropic) or n+1 (anisotropic) giving the
autocorrelation parameter(s) (theta, p).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if n_features > 1 and lth == 2:
theta = np.hstack([np.repeat(theta[0], n_features), theta[1]])
elif lth != n_features + 1:
raise Exception("Length of theta must be 2 or %s" % (n_features + 1))
else:
theta = theta.reshape(1, lth)
td = theta[:, 0:-1].reshape(1, n_features) * np.abs(d) ** theta[:, -1]
r = np.exp(- np.sum(td, 1))
return r
def pure_nugget(theta, d):
"""
Spatial independence correlation model (pure nugget).
(Useful when one wants to solve an ordinary least squares problem!)::
n
theta, d --> r(theta, d) = 1 if sum |d_i| == 0
i = 1
0 otherwise
Parameters
----------
theta : array_like
None.
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
n_eval = d.shape[0]
r = np.zeros(n_eval)
r[np.all(d == 0., axis=1)] = 1.
return r
def cubic(theta, d):
"""
Cubic correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - 3(theta_j*d_ij)^2 + 2(theta_j*d_ij)^3) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or " + str(n_features))
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td ** 2. * (3. - 2. * td)
r = np.prod(ss, 1)
return r
def linear(theta, d):
"""
Linear correlation model::
theta, d --> r(theta, d) =
n
prod max(0, 1 - theta_j*d_ij) , i = 1,...,m
j = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) with the values of the autocorrelation
model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
lth = theta.size
if lth == 1:
td = np.abs(d) * theta
elif lth != n_features:
raise Exception("Length of theta must be 1 or %s" % n_features)
else:
td = np.abs(d) * theta.reshape(1, n_features)
td[td > 1.] = 1.
ss = 1. - td
r = np.prod(ss, 1)
return r
|
entoo/portage-src
|
refs/heads/master
|
pym/portage/util/_async/FileDigester.py
|
12
|
# Copyright 2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
from portage.checksum import perform_multiple_checksums
from portage.util._async.ForkProcess import ForkProcess
from _emerge.PipeReader import PipeReader
class FileDigester(ForkProcess):
"""
Asynchronously generate file digests. Pass in file_path and
hash_names, and after successful execution, the digests
attribute will be a dict containing all of the requested
digests.
"""
__slots__ = ('file_path', 'digests', 'hash_names',
'_digest_pipe_reader', '_digest_pw')
def _start(self):
pr, pw = os.pipe()
self.fd_pipes = {}
self.fd_pipes[pw] = pw
self._digest_pw = pw
self._digest_pipe_reader = PipeReader(
input_files={"input":pr},
scheduler=self.scheduler)
self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
self._digest_pipe_reader.start()
ForkProcess._start(self)
os.close(pw)
def _run(self):
digests = perform_multiple_checksums(self.file_path,
hashes=self.hash_names)
buf = "".join("%s=%s\n" % item
for item in digests.items()).encode('utf_8')
while buf:
buf = buf[os.write(self._digest_pw, buf):]
return os.EX_OK
def _parse_digests(self, data):
digests = {}
for line in data.decode('utf_8').splitlines():
parts = line.split('=', 1)
if len(parts) == 2:
digests[parts[0]] = parts[1]
self.digests = digests
def _pipe_logger_exit(self, pipe_logger):
# Ignore this event, since we want to ensure that we
# exit only after _digest_pipe_reader has reached EOF.
self._pipe_logger = None
def _digest_pipe_reader_exit(self, pipe_reader):
self._parse_digests(pipe_reader.getvalue())
self._digest_pipe_reader = None
self._unregister()
self.wait()
def _unregister(self):
ForkProcess._unregister(self)
pipe_reader = self._digest_pipe_reader
if pipe_reader is not None:
self._digest_pipe_reader = None
pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
pipe_reader.cancel()
|
shaunbrady/boto
|
refs/heads/develop
|
boto/beanstalk/wrapper.py
|
153
|
"""Wraps layer1 api methods and converts layer1 dict responses to objects."""
from boto.beanstalk.layer1 import Layer1
import boto.beanstalk.response
from boto.exception import BotoServerError
import boto.beanstalk.exception as exception
def beanstalk_wrapper(func, name):
def _wrapped_low_level_api(*args, **kwargs):
try:
response = func(*args, **kwargs)
except BotoServerError as e:
raise exception.simple(e)
# Turn 'this_is_a_function_name' into 'ThisIsAFunctionNameResponse'.
cls_name = ''.join([part.capitalize() for part in name.split('_')]) + 'Response'
cls = getattr(boto.beanstalk.response, cls_name)
return cls(response)
return _wrapped_low_level_api
class Layer1Wrapper(object):
def __init__(self, *args, **kwargs):
self.api = Layer1(*args, **kwargs)
def __getattr__(self, name):
try:
return beanstalk_wrapper(getattr(self.api, name), name)
except AttributeError:
raise AttributeError("%s has no attribute %r" % (self, name))
|
nugget/home-assistant
|
refs/heads/dev
|
homeassistant/components/harmony/__init__.py
|
17
|
"""Support for Harmony devices."""
|
sutartmelson/girder
|
refs/heads/master
|
plugins/google_analytics/plugin_tests/__init__.py
|
12133432
| |
lihui7115/ChromiumGStreamerBackend
|
refs/heads/master
|
tools/telemetry/telemetry/internal/testing/__init__.py
|
12133432
| |
vankrajnova/test_python
|
refs/heads/master
|
model/__init__.py
|
12133432
| |
Alwnikrotikz/l5rcm
|
refs/heads/master
|
widgets/cknumwidget.py
|
3
|
# Copyright (C) 2011 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys
from PySide import QtCore, QtGui
def find(f, seq):
"""Return first item in sequence where f(item) == True."""
for item in seq:
if f(item):
return item
class CkNumWidget(QtGui.QWidget):
valueChanged = QtCore.Signal(int, int)
def __init__(self, count = 9, parent = None):
super(CkNumWidget, self).__init__(parent)
self.count = count
self.checks = []
self.value = 0
hbox = QtGui.QHBoxLayout(self)
hbox.setSpacing(0)
hbox.setContentsMargins(0,0,0,0)
for i in xrange(0, count):
ck = QtGui.QCheckBox(self)
self.checks.append( ck )
hbox.addWidget( ck )
ck.clicked.connect(self.on_ck_toggled)
ck.setObjectName( str(i+1) )
def on_ck_toggled(self):
old_v = self.value
fred = find(lambda ck: ck == self.sender(), self.checks)
flag = fred.isChecked()
if int(fred.objectName()) == old_v:
self.value = self.value - 1
else:
self.value = int(fred.objectName())
#print 'old_v: %d, value: %d' % (old_v, self.value)
for i in xrange(0, self.count):
ck = self.checks[i]
if flag:
if int(ck.objectName()) <= self.value:
self.checks[i].setChecked(flag)
else:
self.checks[i].setChecked(not flag)
else:
if int(ck.objectName()) <= self.value:
self.checks[i].setChecked(not flag)
else:
self.checks[i].setChecked(flag)
if self.value != old_v:
self.valueChanged.emit(old_v, self.value)
def set_value(self, value):
if value == self.value:
return
for i in xrange(0, self.count):
ck = self.checks[i]
if int(ck.objectName()) <= value:
self.checks[i].setChecked(True)
else:
self.checks[i].setChecked(False)
old_v = self.value
self.value = value
self.valueChanged.emit(old_v, value)
def get_value(self):
return self.value
### MAIN ###
def main():
app = QtGui.QApplication(sys.argv)
dlg = QtGui.QDialog()
vbox = QtGui.QVBoxLayout(dlg)
vbox.addWidget( CkNumWidget(dlg) )
dlg.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
virtualopensystems/nova
|
refs/heads/bp/vif-vhostuser
|
nova/compute/monitors/virt/cpu_monitor.py
|
13
|
# Copyright 2013 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CPU monitor based on compute driver to retrieve CPU information
"""
from oslo.config import cfg
from nova.compute import monitors
from nova.compute.monitors import cpu_monitor as monitor
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
class ComputeDriverCPUMonitor(monitor._CPUMonitorBase):
"""CPU monitor based on compute driver
The class inherits from the base class for resource monitors,
and implements the essential methods to get metric names and their real
values for CPU utilization.
The compute manager could load the monitors to retrieve the metrics
of the devices on compute nodes and know their resource information
periodically.
"""
def __init__(self, parent):
super(ComputeDriverCPUMonitor, self).__init__(parent)
self.source = CONF.compute_driver
self.driver = self.compute_manager.driver
self._cpu_stats = {}
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_frequency(self, **kwargs):
return self._data.get("cpu.frequency")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_user_time(self, **kwargs):
return self._data.get("cpu.user.time")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_kernel_time(self, **kwargs):
return self._data.get("cpu.kernel.time")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_idle_time(self, **kwargs):
return self._data.get("cpu.idle.time")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_iowait_time(self, **kwargs):
return self._data.get("cpu.iowait.time")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_user_percent(self, **kwargs):
return self._data.get("cpu.user.percent")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_kernel_percent(self, **kwargs):
return self._data.get("cpu.kernel.percent")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_idle_percent(self, **kwargs):
return self._data.get("cpu.idle.percent")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_iowait_percent(self, **kwargs):
return self._data.get("cpu.iowait.percent")
@monitors.ResourceMonitorBase.add_timestamp
def _get_cpu_percent(self, **kwargs):
return self._data.get("cpu.percent")
def _update_data(self, **kwargs):
# Don't allow to call this function so frequently (<= 1 sec)
now = timeutils.utcnow()
if self._data.get("timestamp") is not None:
delta = now - self._data.get("timestamp")
if delta.seconds <= 1:
return
self._data = {}
self._data["timestamp"] = now
# Extract node's CPU statistics.
try:
stats = self.driver.get_host_cpu_stats()
self._data["cpu.user.time"] = stats["user"]
self._data["cpu.kernel.time"] = stats["kernel"]
self._data["cpu.idle.time"] = stats["idle"]
self._data["cpu.iowait.time"] = stats["iowait"]
self._data["cpu.frequency"] = stats["frequency"]
except (NotImplementedError, TypeError, KeyError) as ex:
LOG.exception(_("Not all properties needed are implemented "
"in the compute driver: %s"), ex)
raise exception.ResourceMonitorError(
monitor=self.__class__.__name__)
# The compute driver API returns the absolute values for CPU times.
# We compute the utilization percentages for each specific CPU time
# after calculating the delta between the current reading and the
# previous reading.
stats["total"] = (stats["user"] + stats["kernel"]
+ stats["idle"] + stats["iowait"])
cputime = float(stats["total"] - self._cpu_stats.get("total", 0))
perc = (stats["user"] - self._cpu_stats.get("user", 0)) / cputime
self._data["cpu.user.percent"] = perc
perc = (stats["kernel"] - self._cpu_stats.get("kernel", 0)) / cputime
self._data["cpu.kernel.percent"] = perc
perc = (stats["idle"] - self._cpu_stats.get("idle", 0)) / cputime
self._data["cpu.idle.percent"] = perc
perc = (stats["iowait"] - self._cpu_stats.get("iowait", 0)) / cputime
self._data["cpu.iowait.percent"] = perc
# Compute the current system-wide CPU utilization as a percentage.
used = stats["user"] + stats["kernel"] + stats["iowait"]
prev_used = (self._cpu_stats.get("user", 0)
+ self._cpu_stats.get("kernel", 0)
+ self._cpu_stats.get("iowait", 0))
perc = (used - prev_used) / cputime
self._data["cpu.percent"] = perc
self._cpu_stats = stats.copy()
|
vmayoral/basic_reinforcement_learning
|
refs/heads/master
|
tutorial2/cliff_S.py
|
1
|
import cellular
import sarsa
import time
import sys
startCell = None
class Cell(cellular.Cell):
def __init__(self):
self.cliff = False
self.goal = False
self.wall = False
def colour(self):
if self.cliff:
return 'red'
if self.goal:
return 'green'
if self.wall:
return 'black'
else:
return 'white'
def load(self, data):
global startCell
if data == 'S':
startCell = self
if data == '.':
self.wall = True
if data == 'X':
self.cliff = True
if data == 'G':
self.goal = True
class Agent(cellular.Agent):
def __init__(self):
self.ai = sarsa.Sarsa(
actions=range(directions), epsilon=0.1, alpha=0.1, gamma=0.9)
self.lastAction = None
self.score = 0
self.deads = 0
def colour(self):
return 'blue'
def update(self):
reward = self.calcReward()
state = self.calcState()
action = self.ai.chooseAction(state)
if self.lastAction is not None:
self.ai.learn(
self.lastState, self.lastAction, reward, state, action)
self.lastState = state
self.lastAction = action
here = self.cell
if here.goal or here.cliff:
self.cell = startCell
self.lastAction = None
else:
self.goInDirection(action)
def calcState(self):
return self.cell.x, self.cell.y
def calcReward(self):
here = self.cell
if here.cliff:
self.deads += 1
return cliffReward
elif here.goal:
self.score += 1
return goalReward
else:
return normalReward
normalReward = -1
cliffReward = -100
goalReward = 50
directions = 4
world = cellular.World(Cell, directions=directions, filename='../worlds/cliff.txt')
if startCell is None:
print "You must indicate where the agent starts by putting a 'S' in the map file"
sys.exit()
agent = Agent()
world.addAgent(agent, cell=startCell)
pretraining = 100000
for i in range(pretraining):
if i % 1000 == 0:
print i, agent.score, agent.deads
agent.score = 0
agent.deads = 0
world.update()
world.display.activate(size=30)
world.display.delay = 1
while 1:
world.update()
|
tomkins/django-pipeline
|
refs/heads/master
|
tests/tests/test_glob.py
|
40
|
from __future__ import unicode_literals
import os
import shutil
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.test import TestCase
from pipeline import glob
local_path = lambda path: os.path.join(os.path.dirname(__file__), path)
class GlobTest(TestCase):
def normpath(self, *parts):
return os.path.normpath(os.path.join(*parts))
def mktemp(self, *parts):
filename = self.normpath(*parts)
base, file = os.path.split(filename)
base = os.path.join(self.storage.location, base)
if not os.path.exists(base):
os.makedirs(base)
self.storage.save(filename, ContentFile(""))
def assertSequenceEqual(self, l1, l2):
self.assertEqual(set(l1), set(l2))
def setUp(self):
self.storage = FileSystemStorage(local_path('glob_dir'))
self.old_storage = glob.default_storage
glob.default_storage = self.storage
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
return glob.glob(pattern)
def tearDown(self):
shutil.rmtree(self.storage.location)
glob.default_storage = self.old_storage
def test_glob_literal(self):
self.assertSequenceEqual(self.glob('a'),
[self.normpath('a')])
self.assertSequenceEqual(self.glob('a', 'D'),
[self.normpath('a', 'D')])
self.assertSequenceEqual(self.glob('aab'),
[self.normpath('aab')])
self.assertSequenceEqual(self.glob('zymurgy'), [])
def test_glob_one_directory(self):
self.assertSequenceEqual(self.glob('a*'),
map(self.normpath, ['a', 'aab', 'aaa']))
self.assertSequenceEqual(self.glob('*a'),
map(self.normpath, ['a', 'aaa']))
self.assertSequenceEqual(self.glob('aa?'),
map(self.normpath, ['aaa', 'aab']))
self.assertSequenceEqual(self.glob('aa[ab]'),
map(self.normpath, ['aaa', 'aab']))
self.assertSequenceEqual(self.glob('*q'), [])
def test_glob_nested_directory(self):
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
self.assertSequenceEqual(self.glob('a', 'bcd', 'E*'),
[self.normpath('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
self.assertSequenceEqual(self.glob('a', 'bcd', 'E*'), [
self.normpath('a', 'bcd', 'EF'),
self.normpath('a', 'bcd', 'efg')
])
self.assertSequenceEqual(self.glob('a', 'bcd', '*g'),
[self.normpath('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
self.assertSequenceEqual(self.glob('*', 'D'),
[self.normpath('a', 'D')])
self.assertSequenceEqual(self.glob('*', '*a'), [])
self.assertSequenceEqual(self.glob('a', '*', '*', '*a'),
[self.normpath('a', 'bcd', 'efg', 'ha')])
self.assertSequenceEqual(self.glob('?a?', '*F'),
map(self.normpath, [os.path.join('aaa', 'zzzF'),
os.path.join('aab', 'F')]))
def test_glob_directory_with_trailing_slash(self):
# We are verifying that when there is wildcard pattern which
# ends with os.sep doesn't blow up.
paths = glob.glob('*' + os.sep)
self.assertEqual(len(paths), 4)
self.assertTrue(all([os.sep in path for path in paths]))
|
sublime1809/django
|
refs/heads/master
|
django/middleware/clickjacking.py
|
185
|
"""
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django settings to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
http://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options', None) is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
settings. If not found in settings, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
|
newerthcom/savagerebirth
|
refs/heads/master
|
libs/python-2.72/Lib/sre_parse.py
|
156
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = type(()), type([])
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in seqtypes:
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0L
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxint
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + long(i) * av[0]
hi = hi + long(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
item_no = None
if source.next and not source.match(")", 0):
raise error, "pattern not properly closed"
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
setappend((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if hi:
max = int(hi)
if max < min:
raise error, "bad repeat interval"
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in REPEATCODES:
raise error, "multiple repeat"
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not isname(name):
raise error, "bad character in group name"
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not isname(name):
raise error, "bad character in group name"
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error, "syntax error"
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error, "unterminated name"
if char == ")":
break
condname = condname + char
group = 2
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error, "unknown group name"
else:
try:
condgroup = int(condname)
except ValueError:
raise error, "bad character in group name"
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "bad group name"
try:
index = int(name)
if index < 0:
raise error, "negative group number"
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = s
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error, "unmatched group"
except IndexError:
raise error, "invalid group reference"
return sep.join(literals)
|
fgouget/spice-common
|
refs/heads/master
|
python_modules/__init__.py
|
12133432
| |
cweems/api-timeline-js
|
refs/heads/master
|
website/core/settings/__init__.py
|
12133432
| |
fredhusser/scikit-learn
|
refs/heads/master
|
sklearn/preprocessing/tests/__init__.py
|
12133432
| |
jss-emr/openerp-7-src
|
refs/heads/master
|
openerp/addons/base/ir/ir_default.py
|
73
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class ir_default(osv.osv):
_name = 'ir.default'
_columns = {
'field_tbl': fields.char('Object',size=64),
'field_name': fields.char('Object Field',size=64),
'value': fields.char('Default Value',size=64),
'uid': fields.many2one('res.users', 'Users'),
'page': fields.char('View',size=64),
'ref_table': fields.char('Table Ref.',size=64),
'ref_id': fields.integer('ID Ref.',size=64),
'company_id': fields.many2one('res.company','Company')
}
def _get_company_id(self, cr, uid, context=None):
res = self.pool.get('res.users').read(cr, uid, [uid], ['company_id'], context=context)
if res and res[0]['company_id']:
return res[0]['company_id'][0]
return False
_defaults = {
'company_id': _get_company_id,
}
ir_default()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
arthur-wsw/pinax-messages
|
refs/heads/master
|
pinax/messages/models.py
|
1
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from .signals import message_sent
from .utils import cached_attribute
@python_2_unicode_compatible
class Thread(models.Model):
subject = models.CharField(max_length=150)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through="UserThread")
@classmethod
def inbox(cls, user):
return cls.objects.filter(userthread__user=user, userthread__deleted=False)
@classmethod
def unread(cls, user):
return cls.objects.filter(
userthread__user=user,
userthread__deleted=False,
userthread__unread=True
)
def __str__(self):
return "{}: {}".format(
self.subject,
", ".join([str(user) for user in self.users.all()])
)
def get_absolute_url(self):
return reverse("pinax_messages:thread_detail", args=[self.pk])
@property
@cached_attribute
def first_message(self):
return self.messages.all()[0]
@property
@cached_attribute
def latest_message(self):
return self.messages.order_by("-sent_at")[0]
@classmethod
def ordered(cls, objs):
"""
Returns the iterable ordered the correct way, this is a class method
because we don"t know what the type of the iterable will be.
"""
objs = list(objs)
objs.sort(key=lambda o: o.latest_message.sent_at, reverse=True)
return objs
class UserThread(models.Model):
thread = models.ForeignKey(Thread)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
unread = models.BooleanField()
deleted = models.BooleanField()
class Message(models.Model):
thread = models.ForeignKey(Thread, related_name="messages")
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="sent_messages")
sent_at = models.DateTimeField(default=timezone.now)
content = models.TextField()
@classmethod
def new_reply(cls, thread, user, content):
"""
Create a new reply for an existing Thread.
Mark thread as unread for all other participants, and
mark thread as read by replier.
"""
msg = cls.objects.create(thread=thread, sender=user, content=content)
thread.userthread_set.exclude(user=user).update(deleted=False, unread=True)
thread.userthread_set.filter(user=user).update(deleted=False, unread=False)
message_sent.send(sender=cls, message=msg, thread=thread, reply=True)
return msg
@classmethod
def new_message(cls, from_user, to_users, subject, content):
"""
Create a new Message and Thread.
Mark thread as unread for all recipients, and
mark thread as read and deleted from inbox by creator.
"""
thread = Thread.objects.create(subject=subject)
for user in to_users:
thread.userthread_set.create(user=user, deleted=False, unread=True)
thread.userthread_set.create(user=from_user, deleted=True, unread=False)
msg = cls.objects.create(thread=thread, sender=from_user, content=content)
message_sent.send(sender=cls, message=msg, thread=thread, reply=False)
return msg
class Meta:
ordering = ("sent_at",)
def get_absolute_url(self):
return self.thread.get_absolute_url()
|
HiSPARC/station-software
|
refs/heads/master
|
user/python/Lib/site-packages/pip/_internal/cli/base_command.py
|
7
|
"""Base Command class, and related routines"""
from __future__ import absolute_import
import logging
import logging.config
import optparse
import os
import sys
from pip._internal.cli import cmdoptions
from pip._internal.cli.parser import (
ConfigOptionParser, UpdatingDefaultsHelpFormatter,
)
from pip._internal.cli.status_codes import (
ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR,
VIRTUALENV_NOT_FOUND,
)
from pip._internal.download import PipSession
from pip._internal.exceptions import (
BadCommand, CommandError, InstallationError, PreviousBuildDirError,
UninstallationError,
)
from pip._internal.index import PackageFinder
from pip._internal.locations import running_under_virtualenv
from pip._internal.req.constructors import (
install_req_from_editable, install_req_from_line,
)
from pip._internal.req.req_file import parse_requirements
from pip._internal.utils.logging import setup_logging
from pip._internal.utils.misc import get_prog, normalize_path
from pip._internal.utils.outdated import pip_version_check
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional # noqa: F401
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(object):
name = None # type: Optional[str]
usage = None # type: Optional[str]
hidden = False # type: bool
ignore_require_venv = False # type: bool
def __init__(self, isolated=False):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
'isolated': isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
# Set verbosity so that it can be used elsewhere.
self.verbosity = options.verbose - options.quiet
setup_logging(
verbosity=self.verbosity,
no_color=options.no_color,
user_log_file=options.log,
)
# TODO: Try to get these passing down from the command?
# without resorting to os.environ to hold these.
# This also affects isolated builds and it should.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv and not self.ignore_require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:', exc_info=True)
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:', exc_info=True)
return ERROR
except BaseException:
logger.critical('Exception:', exc_info=True)
return UNKNOWN_ERROR
finally:
allow_version_check = (
# Does this command have the index_group options?
hasattr(options, "no_index") and
# Is this command allowed to perform this check?
not (options.disable_pip_version_check or options.no_index)
)
# Check if we're using the latest version of pip available
if allow_version_check:
session = self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)
)
with session:
pip_version_check(session, options)
# Shutdown the logging module
logging.shutdown()
return SUCCESS
class RequirementCommand(Command):
@staticmethod
def populate_requirement_set(requirement_set, args, options, finder,
session, name, wheel_cache):
"""
Marshal cmd line args into a requirement set.
"""
# NOTE: As a side-effect, options.require_hashes and
# requirement_set.require_hashes may be updated
for filename in options.constraints:
for req_to_add in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session, wheel_cache=wheel_cache):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for filename in options.requirements:
for req_to_add in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
# If --require-hashes was a line in a requirements file, tell
# RequirementSet about it:
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or options.requirements):
opts = {'name': name}
if options.find_links:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(see "pip help %(name)s")' % opts)
def _build_package_finder(self, options, session,
platform=None, python_versions=None,
abi=None, implementation=None):
"""
Create a package finder appropriate to this requirement command.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
platform=platform,
versions=python_versions,
abi=abi,
implementation=implementation,
prefer_binary=options.prefer_binary,
)
|
rczajka/infoscreen
|
refs/heads/master
|
infoscreen/settings.d/50-static.py
|
1
|
MEDIA_ROOT = path.join(PROJECT_DIR, 'media/')
MEDIA_URL = '/media/'
STATIC_ROOT = path.join(PROJECT_DIR, 'static/')
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
PIPELINE_CSS = {
'base': {
'source_filenames': (
'css/base.scss',
),
'output_filename': 'compressed/base.css',
},
}
PIPELINE_JS = {
'base': {
'source_filenames': (
"infoscreen/jquery.min.js",
"infoscreen/jquery.cycle.all.latest.js",
"infoscreen/infoscreen.js",
),
'output_filename': 'compressed/base.js',
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.sass.SASSCompiler',
)
PIPELINE_STORAGE = 'pipeline.storage.PipelineFinderStorage'
|
stephendade/ardupilot
|
refs/heads/master
|
Tools/scripts/check_firmware_version.py
|
27
|
#!/usr/bin/env python
'''
check firmware-version.txt in binaries directory
'''
import os
VEHICLES = ['AntennaTracker', 'Copter', 'Plane', 'Rover', 'Sub']
def parse_git_version(gfile):
'''parse git-version.txt, producing a firmware-version.txt'''
gv = open(gfile).readlines()
vline = gv[-1]
if not vline.startswith("APMVERSION:"):
print("Bad version %s in %s" % (vline, gfile))
return None
vline = vline[11:]
a = vline.split('V')
if len(a) != 2:
return None
vers = a[1].strip()
if vers[-1].isdigit():
return vers+"-FIRMWARE_VERSION_TYPE_OFFICIAL"
print("Bad vers %s in %s" % (vers, gfile))
return None
def check_fw_version(version):
try:
(version_numbers, release_type) = version.split("-")
(_, _, _) = version_numbers.split(".")
except Exception:
return False
return True
def check_version(vehicle):
'''check firmware-version.txt version for a vehicle'''
for d in os.listdir(vehicle):
if not d.startswith("stable"):
continue
stable_dir = '%s/%s' % (vehicle, d)
for b in sorted(os.listdir(stable_dir)):
if not os.path.isdir(os.path.join(stable_dir, b)):
continue
vfile = os.path.join(stable_dir, b, "firmware-version.txt")
if os.path.exists(vfile):
v = open(vfile).read()
if check_fw_version(v):
continue
gfile = os.path.join(stable_dir, b, "git-version.txt")
if not os.path.exists(gfile):
print("Missing %s" % gfile)
continue
v = parse_git_version(gfile)
if v is not None:
open(vfile, "w").write(v)
print("Added %s" % vfile)
continue
print("Failed for %s" % gfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='check_firmware_version.py')
args = parser.parse_args()
for v in VEHICLES:
check_version(v)
|
korsimoro/indy-sdk
|
refs/heads/init-node-wrapper
|
wrappers/python/tests/conftest.py
|
1
|
import asyncio
import json
import logging
from os import environ, makedirs
from os.path import dirname
from pathlib import Path
from shutil import rmtree
from tempfile import gettempdir
import pytest
from indy import wallet, pool, signus, ledger
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(scope="session")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture
def seed_trustee1():
logger = logging.getLogger(__name__)
logger.debug("seed_trustee1: >>>")
res = "000000000000000000000000Trustee1"
logger.debug("seed_trustee1: <<< res: %r", res)
return res
@pytest.fixture
def seed_steward1():
logger = logging.getLogger(__name__)
logger.debug("seed_trustee1: >>>")
res = "000000000000000000000000Steward1"
logger.debug("seed_trustee1: <<< res: %r", res)
return res
@pytest.fixture
def seed_my1():
logger = logging.getLogger(__name__)
logger.debug("seed_my1: >>>")
res = "00000000000000000000000000000My1"
logger.debug("seed_my1: <<< res: %r", res)
return res
@pytest.fixture
async def endpoint():
return "127.0.0.1:9700"
@pytest.fixture
def path_temp():
logger = logging.getLogger(__name__)
logger.debug("path_temp: >>>")
path = Path(gettempdir()).joinpath("indy")
if path.exists():
logger.debug("path_temp: Cleanup tmp path: %s", path)
rmtree(str(path))
logger.debug("path_temp: yield: %r", path)
yield path
if path.exists():
logger.debug("path_temp: Cleanup tmp path: %s", path)
rmtree(str(path))
logger.debug("path_temp: <<<")
@pytest.fixture
def path_home() -> Path:
logger = logging.getLogger(__name__)
logger.debug("path_home: >>>")
path = Path.home().joinpath(".indy")
if path.exists():
logger.debug("path_home: Cleanup home path: %r", path)
rmtree(str(path))
logger.debug("path_home: yield: %r", path)
yield path
if path.exists():
logger.debug("path_home: Cleanup home path: %r", path)
rmtree(str(path))
logger.debug("path_home: <<<")
@pytest.fixture
def wallet_name():
logger = logging.getLogger(__name__)
logger.debug("wallet_name: >>>")
res = "wallet1"
logger.debug("wallet_name: <<< res: %r", res)
return res
@pytest.fixture
def wallet_type():
logger = logging.getLogger(__name__)
logger.debug("wallet_type: >>>")
res = "default"
logger.debug("wallet_type: <<< res: %r", res)
return res
@pytest.fixture
def wallet_config():
logger = logging.getLogger(__name__)
logger.debug("wallet_config: >>>")
res = None
logger.debug("wallet_config: <<< res: %r", res)
return res
@pytest.fixture
def xwallet_cleanup():
logger = logging.getLogger(__name__)
logger.debug("wallet_cleanup: >>>")
res = True
logger.debug("wallet_cleanup: <<< res: %r", res)
return res
# noinspection PyUnusedLocal
@pytest.fixture
async def xwallet(pool_name, wallet_name, wallet_type, xwallet_cleanup, path_home):
logger = logging.getLogger(__name__)
logger.debug("xwallet: >>> pool_name: %r, wallet_type: %r, xwallet_cleanup: %r, path_home: %r",
pool_name,
wallet_type,
xwallet,
path_home)
logger.debug("xwallet: Creating wallet")
await wallet.create_wallet(pool_name, wallet_name, wallet_type, None, None)
logger.debug("xwallet: yield")
yield
logger.debug("xwallet: Deleting wallet")
await wallet.delete_wallet(wallet_name, None) if xwallet_cleanup else None
logger.debug("xwallet: <<<")
@pytest.fixture
def wallet_runtime_config():
logger = logging.getLogger(__name__)
logger.debug("wallet_runtime_config: >>>")
res = None
logger.debug("wallet_runtime_config: <<< res: %r", res)
return res
@pytest.fixture
def wallet_handle_cleanup():
logger = logging.getLogger(__name__)
logger.debug("wallet_handle_cleanup: >>>")
res = True
logger.debug("wallet_handle_cleanup: <<< res: %r", res)
return res
@pytest.fixture
async def wallet_handle(wallet_name, xwallet, wallet_runtime_config, wallet_handle_cleanup):
logger = logging.getLogger(__name__)
logger.debug(
"wallet_handle: >>> wallet_name: %r, xwallet: %r, wallet_runtime_config: %r, wallet_handle_cleanup: %r",
wallet_name,
xwallet,
wallet_runtime_config,
wallet_handle_cleanup)
logger.debug("wallet_handle: Opening wallet")
wallet_handle = await wallet.open_wallet(wallet_name, wallet_runtime_config, None)
assert type(wallet_handle) is int
logger.debug("wallet_handle: yield %r", wallet_handle)
yield wallet_handle
logger.debug("wallet_handle: Closing wallet")
await wallet.close_wallet(wallet_handle) if wallet_handle_cleanup else None
logger.debug("wallet_handle: <<<")
@pytest.fixture
def pool_name():
logger = logging.getLogger(__name__)
logger.debug("pool_name: >>>")
res = "pool1"
logger.debug("pool_name: <<< res: %r", res)
return res
@pytest.fixture
def pool_ip():
logger = logging.getLogger(__name__)
logger.debug("pool_ip: >>>")
res = environ.get("TEST_POOL_IP", "127.0.0.1")
logger.debug("pool_ip: <<< res: %r", res)
return res
@pytest.fixture
def pool_genesis_txn_count():
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_count: >>>")
res = 4
logger.debug("pool_genesis_txn_count: <<< res: %r", res)
return res
@pytest.fixture
def pool_genesis_txn_data(pool_genesis_txn_count, pool_ip):
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_data: >>> pool_genesis_txn_count: %r, pool_ip: %r",
pool_genesis_txn_count,
pool_ip)
assert 0 < pool_genesis_txn_count <= 4
res = "\n".join([
'{{"data":{{"alias":"Node1","client_ip":"{}","client_port":9702,"node_ip":"{}","node_port":9701,"services":["VALIDATOR"]}},"dest":"Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv","identifier":"Th7MpTaRZVRYnPiabds81Y","txnId":"fea82e10e894419fe2bea7d96296a6d46f50f93f9eeda954ec461b2ed2950b62","type":"0"}}'.format(
pool_ip, pool_ip),
'{{"data":{{"alias":"Node2","client_ip":"{}","client_port":9704,"node_ip":"{}","node_port":9703,"services":["VALIDATOR"]}},"dest":"8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb","identifier":"EbP4aYNeTHL6q385GuVpRV","txnId":"1ac8aece2a18ced660fef8694b61aac3af08ba875ce3026a160acbc3a3af35fc","type":"0"}}'.format(
pool_ip, pool_ip),
'{{"data":{{"alias":"Node3","client_ip":"{}","client_port":9706,"node_ip":"{}","node_port":9705,"services":["VALIDATOR"]}},"dest":"DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya","identifier":"4cU41vWW82ArfxJxHkzXPG","txnId":"7e9f355dffa78ed24668f0e0e369fd8c224076571c51e2ea8be5f26479edebe4","type":"0"}}'.format(
pool_ip, pool_ip),
'{{"data":{{"alias":"Node4","client_ip":"{}","client_port":9708,"node_ip":"{}","node_port":9707,"services":["VALIDATOR"]}},"dest":"4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA","identifier":"TWwCRQRZ2ZHMJFn9TzLp7W","txnId":"aa5e817d7cc626170eca175822029339a444eb0ee8f0bd20d3b0b76e566fb008","type":"0"}}'.format(
pool_ip, pool_ip)
][0:pool_genesis_txn_count])
logger.debug("pool_genesis_txn_data: <<< res: %r", res)
return res
@pytest.fixture
def pool_genesis_txn_path(pool_name, path_temp):
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_path: >>> pool_name: %r",
pool_name)
res = path_temp.joinpath("{}.txn".format(pool_name))
logger.debug("pool_genesis_txn_path: <<< res: %r", res)
return res
# noinspection PyUnusedLocal
@pytest.fixture
def pool_genesis_txn_file(pool_genesis_txn_path, pool_genesis_txn_data):
logger = logging.getLogger(__name__)
logger.debug("pool_genesis_txn_file: >>> pool_genesis_txn_path: %r, pool_genesis_txn_data: %r",
pool_genesis_txn_path,
pool_genesis_txn_data)
makedirs(dirname(pool_genesis_txn_path))
with open(str(pool_genesis_txn_path), "w+") as f:
f.writelines(pool_genesis_txn_data)
logger.debug("pool_genesis_txn_file: <<<")
@pytest.fixture
def pool_ledger_config_cleanup():
return True
# noinspection PyUnusedLocal
@pytest.fixture
async def pool_ledger_config(pool_name, pool_genesis_txn_path, pool_genesis_txn_file, pool_ledger_config_cleanup,
path_home):
logger = logging.getLogger(__name__)
logger.debug("pool_ledger_config: >>> pool_name: %r, pool_genesis_txn_path: %r, pool_genesis_txn_file: %r,"
" pool_ledger_config_cleanup: %r, path_home: %r",
pool_name,
pool_genesis_txn_path,
pool_genesis_txn_file,
pool_ledger_config_cleanup,
path_home)
logger.debug("pool_ledger_config: Creating pool ledger config")
await pool.create_pool_ledger_config(
pool_name,
json.dumps({
"genesis_txn": str(pool_genesis_txn_path)
}))
logger.debug("pool_ledger_config: yield")
yield
logger.debug("pool_ledger_config: Deleting pool ledger config")
await pool.delete_pool_ledger_config(pool_name) if pool_ledger_config_cleanup else None
logger.debug("pool_ledger_config: <<<")
@pytest.fixture
def pool_handle_cleanup():
logger = logging.getLogger(__name__)
logger.debug("pool_handle_cleanup: >>>")
res = True
logger.debug("pool_handle_cleanup: <<< res: %r", res)
return res
@pytest.fixture
def pool_config():
logger = logging.getLogger(__name__)
logger.debug("pool_config: >>>")
res = None
logger.debug("pool_config: <<< res: %r", res)
return res
# noinspection PyUnusedLocal
@pytest.fixture
async def pool_handle(pool_name, pool_ledger_config, pool_config, pool_handle_cleanup):
logger = logging.getLogger(__name__)
logger.debug("pool_handle: >>> pool_name: %r, pool_ledger_config: %r, pool_config: %r, pool_handle_cleanup: %r",
pool_name,
pool_ledger_config,
pool_config,
pool_handle_cleanup)
logger.debug("pool_handle: Opening pool ledger")
pool_handle = await pool.open_pool_ledger(pool_name, pool_config)
assert type(pool_handle) is int
logger.debug("pool_handle: yield: %r", pool_handle)
yield pool_handle
logger.debug("pool_handle: Closing pool ledger")
await pool.close_pool_ledger(pool_handle) if pool_handle_cleanup else None
logger.debug("pool_handle: <<<")
@pytest.fixture
async def identity_trustee1(wallet_handle, seed_trustee1):
(trustee_did, trustee_verkey, _) = await signus.create_and_store_my_did(wallet_handle,
json.dumps({"seed": seed_trustee1}))
yield (trustee_did, trustee_verkey)
@pytest.fixture
async def identity_steward1(wallet_handle, seed_steward1):
(steward_did, steward_verkey, _) = await signus.create_and_store_my_did(wallet_handle,
json.dumps({"seed": seed_steward1}))
yield (steward_did, steward_verkey)
@pytest.fixture
async def identity_my1(wallet_handle, pool_handle, identity_trustee1, seed_my1, ):
(trustee_did, trustee_verkey) = identity_trustee1
(my_did, my_verkey, _) = await signus.create_and_store_my_did(wallet_handle,
json.dumps({"seed": seed_my1}))
nym_request = await ledger.build_nym_request(trustee_did, my_did, my_verkey, None, None)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, trustee_did, nym_request)
yield (my_did, my_verkey)
|
mrbox/django
|
refs/heads/master
|
django/utils/ipv6.py
|
208
|
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. https://github.com/google/ipaddr-py
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy as _
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans an IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continuous zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: An error message used in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in an expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for __ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
|
ArneBab/pypyjs
|
refs/heads/master
|
website/demo/home/rfk/repos/pypy/lib-python/2.7/binhex.py
|
216
|
"""Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
def _test():
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()
|
zhukaixy/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/poplib.py
|
74
|
"""A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <david_ascher@brown.edu>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import errno
import re
import socket
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = b'\r'
LF = b'\n'
CRLF = CR+LF
# maximal line length when calling readline(). This is to prevent
# reading arbitrary lenght lines. RFC 1939 limits POP3 line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
CAPA capa()
STLS stls()
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
encoding = 'UTF-8'
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self._tls_established = False
self.sock = self._create_socket(timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _create_socket(self, timeout):
return socket.create_connection((self.host, self.port), timeout)
def _putline(self, line):
if self._debugging > 1: print('*put*', repr(line))
self.sock.sendall(line + CRLF)
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print('*cmd*', repr(line))
line = bytes(line, self.encoding)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise error_proto('line too long')
if self._debugging > 1: print('*get*', repr(line))
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print('*resp*', repr(resp))
if not resp.startswith(b'+'):
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != b'.':
if line.startswith(b'..'):
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print('*stat*', repr(rets))
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
resp = self._shortcmd('QUIT')
self.close()
return resp
def close(self):
"""Close the connection without assuming anything about it."""
if self.file is not None:
self.file.close()
if self.sock is not None:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as e:
# The server might already have closed the connection
if e.errno != errno.ENOTCONN:
raise
finally:
self.sock.close()
self.file = self.sock = None
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(br'\+OK.*(<[^>]+>)')
def apop(self, user, password):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
password - mailbox password.
NB: mailbox is locked by server from here to 'quit()'
"""
secret = bytes(password, self.encoding)
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = m.group(1)+secret
digest = hashlib.md5(digest).hexdigest()
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
def capa(self):
"""Return server capabilities (RFC 2449) as a dictionary
>>> c=poplib.POP3('localhost')
>>> c.capa()
{'IMPLEMENTATION': ['Cyrus', 'POP3', 'server', 'v2.2.12'],
'TOP': [], 'LOGIN-DELAY': ['0'], 'AUTH-RESP-CODE': [],
'EXPIRE': ['NEVER'], 'USER': [], 'STLS': [], 'PIPELINING': [],
'UIDL': [], 'RESP-CODES': []}
>>>
Really, according to RFC 2449, the cyrus folks should avoid
having the implementation split into multiple arguments...
"""
def _parsecap(line):
lst = line.decode('ascii').split()
return lst[0], lst[1:]
caps = {}
try:
resp = self._longcmd('CAPA')
rawcaps = resp[1]
for capline in rawcaps:
capnm, capargs = _parsecap(capline)
caps[capnm] = capargs
except error_proto as _err:
raise error_proto('-ERR CAPA not supported by server')
return caps
def stls(self, context=None):
"""Start a TLS session on the active connection as specified in RFC 2595.
context - a ssl.SSLContext
"""
if not HAVE_SSL:
raise error_proto('-ERR TLS support missing')
if self._tls_established:
raise error_proto('-ERR TLS session already established')
caps = self.capa()
if not 'STLS' in caps:
raise error_proto('-ERR STLS not supported by server')
if context is None:
context = ssl._create_stdlib_context()
resp = self._shortcmd('STLS')
server_hostname = self.host if ssl.HAS_SNI else None
self.sock = context.wrap_socket(self.sock,
server_hostname=server_hostname)
self.file = self.sock.makefile('rb')
self._tls_established = True
return resp
if HAVE_SSL:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None,
context=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
context - a ssl.SSLContext
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port=POP3_SSL_PORT, keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
POP3.__init__(self, host, port, timeout)
def _create_socket(self, timeout):
sock = POP3._create_socket(self, timeout)
server_hostname = self.host if ssl.HAS_SNI else None
sock = self.context.wrap_socket(sock,
server_hostname=server_hostname)
return sock
def stls(self, keyfile=None, certfile=None, context=None):
"""The method unconditionally raises an exception since the
STLS command doesn't make any sense on an already established
SSL/TLS session.
"""
raise error_proto('-ERR TLS session already established')
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print(a.getwelcome())
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print("Message %d:" % i)
for line in msg:
print(' ' + line)
print('-----------------------')
a.quit()
|
dongguangming/pexpect
|
refs/heads/master
|
examples/script.py
|
22
|
#!/usr/bin/env python
'''This spawns a sub-shell (bash) and gives the user interactive control. The
entire shell session is logged to a file called script.log. This behaves much
like the classic BSD command 'script'.
./script.py [-a] [-c command] {logfilename}
logfilename : This is the name of the log file. Default is script.log.
-a : Append to log file. Default is to overwrite log file.
-c : spawn command. Default is to spawn the sh shell.
Example:
This will start a bash shell and append to the log named my_session.log:
./script.py -a -c bash my_session.log
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from __future__ import print_function
from __future__ import absolute_import
import os, sys, time, getopt
import signal, fcntl, termios, struct
import pexpect
global_pexpect_instance = None # Used by signal handler
def exit_with_usage():
print(globals()['__doc__'])
os._exit(1)
def main():
######################################################################
# Parse the options, arguments, get ready, etc.
######################################################################
try:
optlist, args = getopt.getopt(sys.argv[1:], 'h?ac:', ['help','h','?'])
except Exception as e:
print(str(e))
exit_with_usage()
options = dict(optlist)
if len(args) > 1:
exit_with_usage()
if [elem for elem in options if elem in ['-h','--h','-?','--?','--help']]:
print("Help:")
exit_with_usage()
if len(args) == 1:
script_filename = args[0]
else:
script_filename = "script.log"
if '-a' in options:
fout = open(script_filename, "ab")
else:
fout = open(script_filename, "wb")
if '-c' in options:
command = options['-c']
else:
command = "sh"
# Begin log with date/time in the form CCCCyymm.hhmmss
fout.write ('# %4d%02d%02d.%02d%02d%02d \n' % time.localtime()[:-3])
######################################################################
# Start the interactive session
######################################################################
p = pexpect.spawn(command)
p.logfile = fout
global global_pexpect_instance
global_pexpect_instance = p
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
print("Script recording started. Type ^] (ASCII 29) to escape from the script shell.")
p.interact(chr(29))
fout.close()
return 0
def sigwinch_passthrough (sig, data):
# Check for buggy platforms (see pexpect.setwinsize()).
if 'TIOCGWINSZ' in dir(termios):
TIOCGWINSZ = termios.TIOCGWINSZ
else:
TIOCGWINSZ = 1074295912 # assume
s = struct.pack ("HHHH", 0, 0, 0, 0)
a = struct.unpack ('HHHH', fcntl.ioctl(sys.stdout.fileno(), TIOCGWINSZ , s))
global global_pexpect_instance
global_pexpect_instance.setwinsize(a[0],a[1])
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.