repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
piyushroshan/xen-4.3.2
|
refs/heads/master
|
tools/python/xen/util/xsconstants.py
|
43
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2007 International Business Machines Corp.
# Author: Stefan Berger <stefanb@us.ibm.com>
#============================================================================
XS_INST_NONE = 0
XS_INST_BOOT = (1 << 0)
XS_INST_LOAD = (1 << 1)
XS_POLICY_ACM = (1 << 0)
XS_POLICY_FLASK = (1 << 1)
XS_POLICY_DUMMY = (1 << 2)
XS_POLICY_USE = 0
# Some internal variables used by the Xen-API
ACM_LABEL_VM = (1 << 0)
ACM_LABEL_RES = (1 << 1)
# Base for XS error codes for collision avoidance with other error codes
XSERR_BASE = 0x1000
# XS error codes as used by the Xen-API
XSERR_SUCCESS = 0
XSERR_GENERAL_FAILURE = 1 + XSERR_BASE
XSERR_BAD_XML = 2 + XSERR_BASE # XML is wrong (not according to schema)
XSERR_XML_PROCESSING = 3 + XSERR_BASE
XSERR_POLICY_INCONSISTENT = 4 + XSERR_BASE # i.e., bootstrap name not a VM label
XSERR_FILE_ERROR = 5 + XSERR_BASE
XSERR_BAD_RESOURCE_FORMAT = 6 + XSERR_BASE # badly formatted resource
XSERR_BAD_LABEL_FORMAT = 7 + XSERR_BASE
XSERR_RESOURCE_NOT_LABELED = 8 + XSERR_BASE
XSERR_RESOURCE_ALREADY_LABELED = 9 + XSERR_BASE
XSERR_WRONG_POLICY_TYPE = 10 + XSERR_BASE
XSERR_BOOTPOLICY_INSTALLED = 11 + XSERR_BASE
XSERR_NO_DEFAULT_BOOT_TITLE = 12 + XSERR_BASE
XSERR_POLICY_LOAD_FAILED = 13 + XSERR_BASE
XSERR_POLICY_LOADED = 14 + XSERR_BASE
XSERR_POLICY_TYPE_UNSUPPORTED = 15 + XSERR_BASE
XSERR_BAD_CONFLICTSET = 16 + XSERR_BASE
XSERR_RESOURCE_IN_USE = 17 + XSERR_BASE
XSERR_BAD_POLICY_NAME = 18 + XSERR_BASE
XSERR_VERSION_PREVENTS_UPDATE = 19 + XSERR_BASE
XSERR_BAD_LABEL = 20 + XSERR_BASE
XSERR_VM_WRONG_STATE = 21 + XSERR_BASE
XSERR_POLICY_NOT_LOADED = 22 + XSERR_BASE
XSERR_RESOURCE_ACCESS = 23 + XSERR_BASE
XSERR_HV_OP_FAILED = 24 + XSERR_BASE
XSERR_BOOTPOLICY_INSTALL_ERROR = 25 + XSERR_BASE
XSERR_VM_NOT_AUTHORIZED = 26 + XSERR_BASE
XSERR_VM_IN_CONFLICT = 27 + XSERR_BASE
XSERR_POLICY_HAS_DUPLICATES = 28 + XSERR_BASE
XSERR_LAST = 28 + XSERR_BASE ## KEEP LAST
XSERR_MESSAGES = [
'',
'General Failure',
'XML is malformed',
'Error while processing XML',
'Policy has inconsistencies',
'A file access error occurred',
'The resource format is not valid',
'The label format is not valid',
'The resource is not labeld',
'The resource is already labeld',
'The policy type is wrong',
'The system boot policy is installed',
'Could not find the default boot title',
'Loading of the policy failed',
'The policy is loaded',
'The policy type is unsupported',
'There is a bad conflict set',
'The resource is in use',
'The policy has an invalid name',
'The version of the policy prevents an update',
'The label is bad',
'Operation not premittend - the VM is in the wrong state',
'The policy is not loaded',
'Error accessing resource',
'Operation failed in hypervisor',
'Boot policy installation error',
'VM is not authorized to run',
'VM label conflicts with another VM',
'Duplicate labels or types in policy'
]
def xserr2string(err):
if err == XSERR_SUCCESS:
return "Success"
if err >= XSERR_GENERAL_FAILURE and \
err <= XSERR_LAST:
return XSERR_MESSAGES[err - XSERR_BASE]
return "Unknown XSERR code '%s'." % (hex(err))
# Policy identifiers used in labels
ACM_POLICY_ID = 'ACM'
FLASK_POLICY_ID = 'FLASK'
INVALID_POLICY_PREFIX = 'INV_'
INVALID_SSIDREF = 0xFFFFFFFFL
XS_INACCESSIBLE_LABEL = '__INACCESSIBLE__'
|
richpolis/siveinpy
|
refs/heads/master
|
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langthaimodel.py
|
2929
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
|
houssine78/addons
|
refs/heads/9.0
|
website_operational_offices/models/__init__.py
|
2
|
# -*- coding: utf-8 -*-
from . import res_company
|
dr-strange-strange-love/testselector
|
refs/heads/master
|
job_warehouse/vzt-hdd-stress/vzt-hdd-stress-win10x64-el_capitan-plain-splitted-encryption-smp.py
|
1
|
class ParamHolder():
def __init__(self):
self.params = {
'build': '3.8.1',
'priority': 'High',
'components': 'QA_auto',
}
def param_complement(self):
# Complement code for self.params
return self.params
|
QuickSander/CouchPotatoServer
|
refs/heads/master
|
libs/pyutil/benchutil.py
|
92
|
# Copyright (c) 2002-2013 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Benchmark a function for its behavior with respect to N.
How to use this module:
1. Define a function which runs the code that you want to benchmark. The
function takes a single argument which is the size of the task (i.e. the "N"
parameter). Pass this function as the first argument to rep_bench(), and N as
the second, e.g.:
>>> from pyutil.benchutil import rep_bench
>>> def fib(N):
... if N <= 1:
... return 1
... else:
... return fib(N-1) + fib(N-2)
...
>>> rep_bench(fib, 25, UNITS_PER_SECOND=1000)
best: 1.968e+00, 3th-best: 1.987e+00, mean: 2.118e+00, 3th-worst: 2.175e+00, worst: 2.503e+00 (of 10)
The output is reporting the number of milliseconds that executing the
function took, divided by N, from ten different invocations of
fib(). It reports the best, worst, M-th best, M-th worst, and mean,
where "M" is 1/4 of the number of invocations (in this case 10).
2. Now run it with different values of N and look for patterns:
>>> for N in 1, 5, 9, 13, 17, 21:
... print "%2d" % N,
... rep_bench(fib, N, UNITS_PER_SECOND=1000000)
...
1 best: 9.537e-01, 3th-best: 9.537e-01, mean: 1.121e+00, 3th-worst: 1.192e+00, worst: 2.146e+00 (of 10)
5 best: 5.722e-01, 3th-best: 6.199e-01, mean: 7.200e-01, 3th-worst: 8.106e-01, worst: 8.106e-01 (of 10)
9 best: 2.437e+00, 3th-best: 2.464e+00, mean: 2.530e+00, 3th-worst: 2.570e+00, worst: 2.676e+00 (of 10)
13 best: 1.154e+01, 3th-best: 1.168e+01, mean: 5.638e+01, 3th-worst: 1.346e+01, worst: 4.478e+02 (of 10)
17 best: 6.230e+01, 3th-best: 6.247e+01, mean: 6.424e+01, 3th-worst: 6.460e+01, worst: 7.294e+01 (of 10)
21 best: 3.376e+02, 3th-best: 3.391e+02, mean: 3.521e+02, 3th-worst: 3.540e+02, worst: 3.963e+02 (of 10)
>>> print_bench_footer(UNITS_PER_SECOND=1000000)
all results are in time units per N
time units per second: 1000000; seconds per time unit: 0.000001
(The pattern here is that as N grows, the time per N grows.)
2. If you need to do some setting up before the code can run, then put the
setting-up code into a separate function so that it won't be included in the
timing measurements. A good way to share state between the setting-up function
and the main function is to make them be methods of the same object, e.g.:
>>> import random
>>> class O:
... def __init__(self):
... self.l = []
... def setup(self, N):
... del self.l[:]
... self.l.extend(range(N))
... random.shuffle(self.l)
... def sort(self, N):
... self.l.sort()
...
>>> o = O()
>>> for N in 1000, 10000, 100000, 1000000:
... print "%7d" % N,
... rep_bench(o.sort, N, o.setup)
...
1000 best: 4.830e+02, 3th-best: 4.950e+02, mean: 5.730e+02, 3th-worst: 5.858e+02, worst: 7.451e+02 (of 10)
10000 best: 6.342e+02, 3th-best: 6.367e+02, mean: 6.678e+02, 3th-worst: 6.851e+02, worst: 7.848e+02 (of 10)
100000 best: 8.309e+02, 3th-best: 8.338e+02, mean: 8.435e+02, 3th-worst: 8.540e+02, worst: 8.559e+02 (of 10)
1000000 best: 1.327e+03, 3th-best: 1.339e+03, mean: 1.349e+03, 3th-worst: 1.357e+03, worst: 1.374e+03 (of 10)
3. Useful fact! rep_bench() returns a dict containing the numbers.
4. Things to fix:
a. I used to have it hooked up to use the "hotshot" profiler on the
code being measured. I recently tried to change it to use the newer
cProfile profiler instead, but I don't understand the interface to
cProfiler so it just gives an exception if you pass
profile=True. Please fix this and send me a patch. xxx change it to
statprof
b. Wouldn't it be great if this script emitted results in a json format that
was understood by a tool to make pretty interactive explorable graphs? The
pretty graphs could look like those on http://speed.pypy.org/ . Please make
this work and send me a patch!
"""
import cProfile, operator, time
from decimal import Decimal as D
#from pyutil import jsonutil as json
import platform
if 'windows' in platform.system().lower():
clock = time.clock
else:
clock = time.time
from assertutil import _assert
def makeg(func):
def blah(n, func=func):
for i in xrange(n):
func()
return blah
def to_decimal(x):
"""
See if D(x) returns something. If instead it raises TypeError, x must have been a float, so convert it to Decimal by way of string. (In Python >= 2.7, D(x) does this automatically.
"""
try:
return D(x)
except TypeError:
return D("%0.54f" % (x,))
def mult(a, b):
"""
If we get TypeError from * (possibly because one is float and the other is Decimal), then promote them both to Decimal.
"""
try:
return a * b
except TypeError:
return to_decimal(a) * to_decimal(b)
def rep_bench(func, n, runtime=1.0, initfunc=None, MAXREPS=10, MAXTIME=60.0, profile=False, profresults="pyutil-benchutil.prof", UNITS_PER_SECOND=1, quiet=False):
"""
Will run the func up to MAXREPS times, but won't start a new run if MAXTIME
(wall-clock time) has already elapsed (unless MAXTIME is None).
@param quiet Don't print anything--just return the results dict.
"""
assert isinstance(n, int), (n, type(n))
global worstemptymeasure
emsta = clock()
do_nothing(2**32)
emstop = clock()
empty = emstop - emsta
if empty > worstemptymeasure:
worstemptymeasure = empty
if (worstemptymeasure*2) >= runtime:
raise BadMeasure("Apparently simply invoking an empty Python function can take as long as %0.10f seconds, and we were running iterations for only about %0.10f seconds. So the measurement of the runtime of the code under benchmark is not reliable. Please pass a higher number for the 'runtime' argument to bench_it().")
startwallclocktime = time.time()
tls = [] # (elapsed time per iter in seconds, iters)
bmes = []
while ((len(tls) < MAXREPS) or (MAXREPS is None)) and ((MAXTIME is None) or ((time.time() - startwallclocktime) < MAXTIME)):
if initfunc:
initfunc(n)
try:
tl, iters = bench_it(func, n, runtime=runtime, profile=profile, profresults=profresults)
except BadMeasure, bme:
bmes.append(bme)
else:
tls.append((tl, iters))
if len(tls) == 0:
raise Exception("Couldn't get any measurements within time limits or number-of-attempts limits. Maybe something is wrong with your clock? %s" % (bmes,))
sumtls = sum([tl for (tl, iters) in tls])
mean = sumtls / len(tls)
tls.sort()
worst = tls[-1][0]
best = tls[0][0]
m = len(tls)/4
if m > 0:
mthbest = tls[m-1][0]
mthworst = tls[-m][0]
else:
mthbest = tls[0][0]
mthworst = tls[-1][0]
# The +/-0 index is the best/worst, the +/-1 index is the 2nd-best/worst,
# etc, so we use mp1 to name it.
mp1 = m+1
res = {
'worst': mult(worst, UNITS_PER_SECOND)/n,
'best': mult(best, UNITS_PER_SECOND)/n,
'mp1': mp1,
'mth-best': mult(mthbest, UNITS_PER_SECOND)/n,
'mth-worst': mult(mthworst, UNITS_PER_SECOND)/n,
'mean': mult(mean, UNITS_PER_SECOND)/n,
'num': len(tls),
}
if not quiet:
print "best: %(best)#8.03e, %(mp1)3dth-best: %(mth-best)#8.03e, mean: %(mean)#8.03e, %(mp1)3dth-worst: %(mth-worst)#8.03e, worst: %(worst)#8.03e (of %(num)6d)" % res
return res
MARGINOFERROR = 10
worstemptymeasure = 0
class BadMeasure(Exception):
""" Either the clock wrapped (which happens with time.clock()) or
it went backwards (which happens with time.time() on rare
occasions), (or the code being measured completed before a single
clock tick). """
def __init__(self, startt, stopt, clock):
self.startt = startt
self.stopt = stopt
self.clock = clock
def __repr__(self):
return "<%s %s - %s (%s)>" % (self.__class__.__name__, self.startt, self.stopt, self.clock)
def do_nothing(n):
pass
def bench_it(func, n, runtime=1.0, profile=False, profresults="pyutil-benchutil.prof"):
if profile:
raise NotImplementedException()
else:
iters = 0
st = clock()
deadline = st + runtime
sto = clock()
while sto < deadline:
func(n)
iters += 1
sto = clock()
timeelapsed = sto - st
if (timeelapsed <= 0) or (iters == 0):
raise BadMeasure((timeelapsed, iters))
return (timeelapsed / iters, iters)
def bench(func, initfunc=None, TOPXP=21, MAXREPS=5, MAXTIME=60.0, profile=False, profresults="pyutil-benchutil.prof", outputjson=False, jsonresultsfname="pyutil-benchutil-results.json", UNITS_PER_SECOND=1):
BSIZES = []
for i in range(TOPXP-6, TOPXP+1, 2):
n = int(2 ** i)
if n < 1:
n = 1
if BSIZES and n <= BSIZES[-1]:
n *= 2
BSIZES.append(n)
res = {}
for BSIZE in BSIZES:
print "N: %7d," % BSIZE,
r = rep_bench(func, BSIZE, initfunc=initfunc, MAXREPS=MAXREPS, MAXTIME=MAXTIME, profile=profile, profresults=profresults, UNITS_PER_SECOND=UNITS_PER_SECOND)
res[BSIZE] = r
#if outputjson:
# write_file(jsonresultsfname, json.dumps(res))
return res
def print_bench_footer(UNITS_PER_SECOND=1):
print "all results are in time units per N"
print "time units per second: %s; seconds per time unit: %s" % (UNITS_PER_SECOND, D(1)/UNITS_PER_SECOND)
|
knittledan/Location_Search_Prediction
|
refs/heads/master
|
thirdParty/amazonproduct/utils.py
|
2
|
# Copyright (C) 2009-2013 Sebastian Rahlf <basti at redtoad dot de>
#
# This program is release under the BSD License. You can find the full text of
# the license in the LICENSE file.
from ConfigParser import SafeConfigParser
import os
import sys
REQUIRED_KEYS = [
'access_key',
'secret_key',
'associate_tag',
'locale',
]
CONFIG_FILES = [
'/etc/amazon-product-api.cfg',
'~/.amazon-product-api'
]
def load_file_config(path=None):
"""
Loads configuration from file with following content::
[Credentials]
access_key = <your access key>
secret_key = <your secret key>
associate_tag = <your associate tag>
locale = us
:param path: path to config file. If not specified, locations
``/etc/amazon-product-api.cfg`` and ``~/.amazon-product-api`` are tried.
"""
config = SafeConfigParser()
if path is None:
config.read([os.path.expanduser(path) for path in CONFIG_FILES])
else:
config.read(path)
if not config.has_section('Credentials'):
return {}
return dict(
(key, val)
for key, val in config.items('Credentials')
if key in REQUIRED_KEYS
)
def load_environment_config():
"""
Loads config dict from environmental variables (if set):
* AWS_ACCESS_KEY
* AWS_SECRET_ACCESS_KEY
* AWS_ASSOCIATE_TAG
* AWS_LOCALE
"""
mapper = {
'access_key': 'AWS_ACCESS_KEY',
'secret_key': 'AWS_SECRET_ACCESS_KEY',
'associate_tag': 'AWS_ASSOCIATE_TAG',
'locale': 'AWS_LOCALE',
}
return dict(
(key, os.environ.get(val))
for key, val in mapper.items()
if val in os.environ
)
def load_config(path=None):
"""
Returns a dict with API credentials which is loaded from (in this order):
* Environment variables ``AWS_ACCESS_KEY``, ``AWS_SECRET_ACCESS_KEY``,
``AWS_ASSOCIATE_TAG`` and ``AWS_LOCALE``
* Config files ``/etc/amazon-product-api.cfg`` or ``~/.amazon-product-api``
where the latter may add or replace values of the former.
Whatever is found first counts.
The returned dictionary may look like this::
{
'access_key': '<access key>',
'secret_key': '<secret key>',
'associate_tag': 'redtoad-10',
'locale': 'uk'
}
:param path: path to config file.
"""
config = load_file_config(path)
config.update(load_environment_config())
# substitute None for all values not found
for key in REQUIRED_KEYS:
if key not in config:
config[key] = None
return config
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
Taken from Django's importlib module
https://code.djangoproject.com/browser/django/trunk/django/utils/importlib.py
"""
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def running_on_gae():
"""
Is this module running on Google App Engine (GAE)?
"""
return 'Google' in os.environ.get('SERVER_SOFTWARE', '')
def load_class(name):
"""
Loads class from string.
:param name: fully-qualified class name (e.g. ``processors.etree.
ItemPaginator``)
"""
module_name, class_name = name.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, class_name)
|
Kaezon/allianceauth
|
refs/heads/master
|
allianceauth/authentication/management/commands/__init__.py
|
12133432
| |
ema/conpaas
|
refs/heads/master
|
conpaas-services/src/conpaas/services/htc/manager/__init__.py
|
12133432
| |
LohithBlaze/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/tests/__init__.py
|
12133432
| |
dvliman/jaikuengine
|
refs/heads/master
|
.google_appengine/lib/django-1.5/tests/regressiontests/templates/templatetags/custom.py
|
51
|
import operator
from django import template
from django.template.defaultfilters import stringfilter
from django.template.loader import get_template
from django.utils import six
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (', '.join([six.text_type(arg) for arg in [one, two] + list(args)]))
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join([six.text_type(arg) for arg in args])
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join([six.text_type(arg) for arg in [one, two] + list(args)]),
', '.join(['%s=%s' % (k, v) for (k, v) in sorted_kwarg])
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
@register.inclusion_tag('inclusion.html')
def inclusion_no_params():
"""Expected inclusion_no_params __doc__"""
return {"result" : "inclusion_no_params - Expected result"}
inclusion_no_params.anything = "Expected inclusion_no_params __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_no_params_from_template():
"""Expected inclusion_no_params_from_template __doc__"""
return {"result" : "inclusion_no_params_from_template - Expected result"}
inclusion_no_params_from_template.anything = "Expected inclusion_no_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_param(arg):
"""Expected inclusion_one_param __doc__"""
return {"result" : "inclusion_one_param - Expected result: %s" % arg}
inclusion_one_param.anything = "Expected inclusion_one_param __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_one_param_from_template(arg):
"""Expected inclusion_one_param_from_template __doc__"""
return {"result" : "inclusion_one_param_from_template - Expected result: %s" % arg}
inclusion_one_param_from_template.anything = "Expected inclusion_one_param_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=False)
def inclusion_explicit_no_context(arg):
"""Expected inclusion_explicit_no_context __doc__"""
return {"result" : "inclusion_explicit_no_context - Expected result: %s" % arg}
inclusion_explicit_no_context.anything = "Expected inclusion_explicit_no_context __dict__"
@register.inclusion_tag(get_template('inclusion.html'), takes_context=False)
def inclusion_explicit_no_context_from_template(arg):
"""Expected inclusion_explicit_no_context_from_template __doc__"""
return {"result" : "inclusion_explicit_no_context_from_template - Expected result: %s" % arg}
inclusion_explicit_no_context_from_template.anything = "Expected inclusion_explicit_no_context_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_no_params_with_context(context):
"""Expected inclusion_no_params_with_context __doc__"""
return {"result" : "inclusion_no_params_with_context - Expected result (context value: %s)" % context['value']}
inclusion_no_params_with_context.anything = "Expected inclusion_no_params_with_context __dict__"
@register.inclusion_tag(get_template('inclusion.html'), takes_context=True)
def inclusion_no_params_with_context_from_template(context):
"""Expected inclusion_no_params_with_context_from_template __doc__"""
return {"result" : "inclusion_no_params_with_context_from_template - Expected result (context value: %s)" % context['value']}
inclusion_no_params_with_context_from_template.anything = "Expected inclusion_no_params_with_context_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_params_and_context(context, arg):
"""Expected inclusion_params_and_context __doc__"""
return {"result" : "inclusion_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)}
inclusion_params_and_context.anything = "Expected inclusion_params_and_context __dict__"
@register.inclusion_tag(get_template('inclusion.html'), takes_context=True)
def inclusion_params_and_context_from_template(context, arg):
"""Expected inclusion_params_and_context_from_template __doc__"""
return {"result" : "inclusion_params_and_context_from_template - Expected result (context value: %s): %s" % (context['value'], arg)}
inclusion_params_and_context_from_template.anything = "Expected inclusion_params_and_context_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_two_params(one, two):
"""Expected inclusion_two_params __doc__"""
return {"result": "inclusion_two_params - Expected result: %s, %s" % (one, two)}
inclusion_two_params.anything = "Expected inclusion_two_params __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_two_params_from_template(one, two):
"""Expected inclusion_two_params_from_template __doc__"""
return {"result": "inclusion_two_params_from_template - Expected result: %s, %s" % (one, two)}
inclusion_two_params_from_template.anything = "Expected inclusion_two_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_default(one, two='hi'):
"""Expected inclusion_one_default __doc__"""
return {"result": "inclusion_one_default - Expected result: %s, %s" % (one, two)}
inclusion_one_default.anything = "Expected inclusion_one_default __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_one_default_from_template(one, two='hi'):
"""Expected inclusion_one_default_from_template __doc__"""
return {"result": "inclusion_one_default_from_template - Expected result: %s, %s" % (one, two)}
inclusion_one_default_from_template.anything = "Expected inclusion_one_default_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args(one, two='hi', *args):
"""Expected inclusion_unlimited_args __doc__"""
return {"result": "inclusion_unlimited_args - Expected result: %s" % (', '.join([six.text_type(arg) for arg in [one, two] + list(args)]))}
inclusion_unlimited_args.anything = "Expected inclusion_unlimited_args __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_unlimited_args_from_template(one, two='hi', *args):
"""Expected inclusion_unlimited_args_from_template __doc__"""
return {"result": "inclusion_unlimited_args_from_template - Expected result: %s" % (', '.join([six.text_type(arg) for arg in [one, two] + list(args)]))}
inclusion_unlimited_args_from_template.anything = "Expected inclusion_unlimited_args_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_only_unlimited_args(*args):
"""Expected inclusion_only_unlimited_args __doc__"""
return {"result": "inclusion_only_unlimited_args - Expected result: %s" % (', '.join([six.text_type(arg) for arg in args]))}
inclusion_only_unlimited_args.anything = "Expected inclusion_only_unlimited_args __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_only_unlimited_args_from_template(*args):
"""Expected inclusion_only_unlimited_args_from_template __doc__"""
return {"result": "inclusion_only_unlimited_args_from_template - Expected result: %s" % (', '.join([six.text_type(arg) for arg in args]))}
inclusion_only_unlimited_args_from_template.anything = "Expected inclusion_only_unlimited_args_from_template __dict__"
@register.inclusion_tag('test_incl_tag_current_app.html', takes_context=True)
def inclusion_tag_current_app(context):
"""Expected inclusion_tag_current_app __doc__"""
return {}
inclusion_tag_current_app.anything = "Expected inclusion_tag_current_app __dict__"
@register.inclusion_tag('test_incl_tag_use_l10n.html', takes_context=True)
def inclusion_tag_use_l10n(context):
"""Expected inclusion_tag_use_l10n __doc__"""
return {}
inclusion_tag_use_l10n.anything = "Expected inclusion_tag_use_l10n __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected inclusion_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return {"result": "inclusion_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join([six.text_type(arg) for arg in [one, two] + list(args)]),
', '.join(['%s=%s' % (k, v) for (k, v) in sorted_kwarg])
)}
inclusion_unlimited_args_kwargs.anything = "Expected inclusion_unlimited_args_kwargs __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_tag_without_context_parameter(arg):
"""Expected inclusion_tag_without_context_parameter __doc__"""
return {}
inclusion_tag_without_context_parameter.anything = "Expected inclusion_tag_without_context_parameter __dict__"
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag
def assignment_one_param(arg):
"""Expected assignment_one_param __doc__"""
return "assignment_one_param - Expected result: %s" % arg
assignment_one_param.anything = "Expected assignment_one_param __dict__"
@register.assignment_tag(takes_context=False)
def assignment_explicit_no_context(arg):
"""Expected assignment_explicit_no_context __doc__"""
return "assignment_explicit_no_context - Expected result: %s" % arg
assignment_explicit_no_context.anything = "Expected assignment_explicit_no_context __dict__"
@register.assignment_tag(takes_context=True)
def assignment_no_params_with_context(context):
"""Expected assignment_no_params_with_context __doc__"""
return "assignment_no_params_with_context - Expected result (context value: %s)" % context['value']
assignment_no_params_with_context.anything = "Expected assignment_no_params_with_context __dict__"
@register.assignment_tag(takes_context=True)
def assignment_params_and_context(context, arg):
"""Expected assignment_params_and_context __doc__"""
return "assignment_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
assignment_params_and_context.anything = "Expected assignment_params_and_context __dict__"
@register.assignment_tag
def assignment_two_params(one, two):
"""Expected assignment_two_params __doc__"""
return "assignment_two_params - Expected result: %s, %s" % (one, two)
assignment_two_params.anything = "Expected assignment_two_params __dict__"
@register.assignment_tag
def assignment_one_default(one, two='hi'):
"""Expected assignment_one_default __doc__"""
return "assignment_one_default - Expected result: %s, %s" % (one, two)
assignment_one_default.anything = "Expected assignment_one_default __dict__"
@register.assignment_tag
def assignment_unlimited_args(one, two='hi', *args):
"""Expected assignment_unlimited_args __doc__"""
return "assignment_unlimited_args - Expected result: %s" % (', '.join([six.text_type(arg) for arg in [one, two] + list(args)]))
assignment_unlimited_args.anything = "Expected assignment_unlimited_args __dict__"
@register.assignment_tag
def assignment_only_unlimited_args(*args):
"""Expected assignment_only_unlimited_args __doc__"""
return "assignment_only_unlimited_args - Expected result: %s" % ', '.join([six.text_type(arg) for arg in args])
assignment_only_unlimited_args.anything = "Expected assignment_only_unlimited_args __dict__"
@register.assignment_tag
def assignment_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected assignment_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "assignment_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join([six.text_type(arg) for arg in [one, two] + list(args)]),
', '.join(['%s=%s' % (k, v) for (k, v) in sorted_kwarg])
)
assignment_unlimited_args_kwargs.anything = "Expected assignment_unlimited_args_kwargs __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
|
jeyraof/python-social-auth
|
refs/heads/master
|
social/tests/backends/test_username.py
|
92
|
from social.tests.backends.legacy import BaseLegacyTest
class UsernameTest(BaseLegacyTest):
backend_path = 'social.backends.username.UsernameAuth'
expected_username = 'foobar'
response_body = 'username=foobar'
form = """
<form method="post" action="{0}">
<input name="username" type="text">
<button>Submit</button>
</form>
"""
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
david30907d/feedback_django
|
refs/heads/master
|
spirit/core/utils/markdown/renderer.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import mistune
from django.conf import settings
from django.utils.html import escape
def sanitize_url(url):
url = escape(url) # & -> & ...
parts = url.split(':', 1)
if len(parts) == 1: # No protocol (relative url)
return url
if parts[0] in settings.ST_ALLOWED_URL_PROTOCOLS:
return url
return ''
class Renderer(mistune.Renderer):
# Override
def autolink(self, link, is_email=False):
link = sanitize_url(link)
text = link
if is_email:
link = 'mailto:%s' % link
if self.options['no_follow']:
return '<a rel="nofollow" href="%s">%s</a>' % (link, text)
return '<a href="%s">%s</a>' % (link, text)
# Override
def link(self, link, title, text):
link = sanitize_url(link)
if not title:
if self.options['no_follow']:
return '<a rel="nofollow" href="%s">%s</a>' % (link, text)
return '<a href="%s">%s</a>' % (link, text)
title = escape(title)
if self.options['no_follow']:
return '<a rel="nofollow" href="%s" title="%s">%s</a>' % (link, title, text)
return '<a href="%s" title="%s">%s</a>' % (link, title, text)
# Override
def image(self, src, title, text):
src = sanitize_url(src)
text = escape(text)
if title:
title = escape(title)
html = '<img src="%s" alt="%s" title="%s"' % (src, text, title)
else:
html = '<img src="%s" alt="%s"' % (src, text)
if self.options.get('use_xhtml'):
return '%s />' % html
return '%s>' % html
def audio_link(self, link):
link = sanitize_url(link)
return '<audio controls><source src="{link}">' \
'<a rel="nofollow" href="{link}">{link}</a></audio>\n'.format(link=link)
def image_link(self, src, title, text):
image = self.image(src, title, text)
return '<p>{image}</p>\n'.format(image=image)
def emoji(self, name_class, name_raw):
# todo: add no-follow to links since we are going to need migration to fix emojis
return '<i class="tw tw-{name_class}" title=":{name_raw}:"></i>'.format(
name_class=name_class,
name_raw=name_raw
)
def mention(self, username, url):
return '<a class="comment-mention" rel="nofollow" href="{url}">@{username}</a>'.format(
username=username,
url=url
)
def video_link(self, link):
link = sanitize_url(link)
return '<video controls><source src="{link}">' \
'<a rel="nofollow" href="{link}">{link}</a></video>\n'.format(link=link)
def youtube(self, video_id):
return '<span class="video"><iframe src="https://www.youtube.com/embed/{video_id}?html5=1" ' \
'allowfullscreen></iframe></span>\n'.format(video_id=video_id)
def vimeo(self, video_id):
return '<span class="video"><iframe src="https://player.vimeo.com/video/{video_id}" ' \
'allowfullscreen></iframe></span>\n'.format(video_id=video_id)
def poll(self, name):
return '<poll name={name}>\n'.format(name=name)
def poll_raw(self, poll_txt):
poll_txt = poll_txt.replace('\n', '<br>')
return '<p>{poll}</p>\n'.format(poll=poll_txt)
|
TheArchives/Nexus
|
refs/heads/master
|
core/entities/paintballgun.py
|
1
|
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
if world.blockstore.raw_blocks[world.blockstore.get_offset(x,y+1,z)] != '\x14':
block = '\x14'
world[x, y+1, z] = block
self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world)
self.client.sendBlock(x, y+1, z, block)
if entity[4] not in entities_childerenlist:
for client in worldblockchangesdict:
cx,cy,cz,var_timeofchange,userblock = worldblockchangesdict[client][0][:5]
if (cx,cy,cz) == (x,y+1,z) and time()- var_timeofchange < 2:
worldblockchangedellist.append(client)
if userblock in colorblocks:
i = world.entities_childerenlist_index
world.entities_childerenlist_index += 1
entities_childerenlist.append(i)
entity[4] = i
px,py,pz,ph,pp = worldblockchangesdict[client][1]
distancebetween = ((x-px)**2+(y+1-py)**2+(z-pz)**2)**0.5
h = math.radians(ph*360.0/256.0)
p = math.radians(pp*360.0/256.0)
rx,ry,rz = math.sin(h)*math.cos(p),-math.sin(p),-math.cos(h)*math.cos(p)
entitylist.append(["paintball",(rx*distancebetween+rx+px,ry*distancebetween+ry+py,rz*distancebetween+rz+pz),2,2,(rx,ry,rz),i,userblock])
else:
client.sendServerMessage("Please select a color block to use this Paintball Gun.")
|
lakshayg/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/math_grad_test.py
|
22
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SquaredDifferenceOpTest(test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
with self.test_session(use_gpu=True):
left_tensor = constant_op.constant(l, shape=left_shape)
right_tensor = constant_op.constant(r, shape=right_shape)
output = math_ops.squared_difference(left_tensor, right_tensor)
left_err = gradient_checker.compute_gradient_error(
left_tensor, left_shape, output, output_shape, x_init_value=l)
right_err = gradient_checker.compute_gradient_error(
right_tensor, right_shape, output, output_shape, x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.test_session(use_gpu=True):
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
class MinOrMaxGradientTest(test.TestCase):
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class MaximumOrMinimumGradientTest(test.TestCase):
def testMaximumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.maximum(inputs, 3.0)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
def testMinimumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.minimum(inputs, 2.0)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
class ProdGradientTest(test.TestCase):
def testProdGradient(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs)
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testProdGradientForNegativeAxis(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs, -1)
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
class SegmentMinOrMaxGradientTest(test.TestCase):
def testSegmentMinGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_min,
[2])
self.assertLess(error, 1e-4)
def testSegmentMaxGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_max,
[2])
self.assertLess(error, 1e-4)
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
[1])
self.assertLess(error, 1e-4)
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
[1])
self.assertLess(error, 1e-4)
class FloorModGradientTest(test.TestCase):
def testFloorModGradient(self):
# Making sure the input is not near the discontinuity point where
# x/y == floor(x/y)
ns = constant_op.constant([17.], dtype=dtypes.float32)
inputs = constant_op.constant([131.], dtype=dtypes.float32)
floor_mod = math_ops.floormod(inputs, ns)
with self.test_session():
error = gradient_checker.compute_gradient_error(inputs, [1],
floor_mod, [1])
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
|
nhicher/ansible
|
refs/heads/devel
|
test/units/errors/__init__.py
|
12133432
| |
ewjoachim/hoarse
|
refs/heads/master
|
hoarse/libs/__init__.py
|
12133432
| |
Tan0/ironic
|
refs/heads/master
|
tools/states_to_dot.py
|
3
|
#!/usr/bin/env python
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import optparse
import os
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.insert(0, top_dir)
import pydot
from ironic.common import states
def print_header(text):
print("*" * len(text))
print(text)
print("*" * len(text))
def map_color(text):
# If the text contains 'error'/'fail' then we'll return red...
if 'error' in text or 'fail' in text:
return 'red'
else:
return None
def format_state(state):
# Changes a state (mainly NOSTATE which is the None object) into
# a nicer string...
if state == states.NOSTATE:
state = 'no-state'
return state
def main():
parser = optparse.OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write output to FILE", metavar="FILE")
parser.add_option("-T", "--format", dest="format",
help="output in given format (default: png)",
default='png')
parser.add_option("--no-labels", dest="labels",
help="do not include labels",
action='store_false', default=True)
(options, args) = parser.parse_args()
if options.filename is None:
options.filename = 'states.%s' % options.format
source = states.machine
graph_name = '"Ironic states"'
g = pydot.Dot(graph_name=graph_name, rankdir='LR',
nodesep='0.25', overlap='false',
ranksep="0.5", splines='true',
ordering='in')
node_attrs = {
'fontsize': '11',
}
nodes = {}
for (start_state, on_event, end_state) in source:
start_state = format_state(start_state)
end_state = format_state(end_state)
if start_state not in nodes:
start_node_attrs = node_attrs.copy()
text_color = map_color(start_state)
if text_color:
start_node_attrs['fontcolor'] = text_color
nodes[start_state] = pydot.Node(start_state, **start_node_attrs)
g.add_node(nodes[start_state])
if end_state not in nodes:
end_node_attrs = node_attrs.copy()
text_color = map_color(end_state)
if text_color:
end_node_attrs['fontcolor'] = text_color
nodes[end_state] = pydot.Node(end_state, **end_node_attrs)
g.add_node(nodes[end_state])
edge_attrs = {}
if options.labels:
edge_attrs['label'] = "on_%s" % on_event
edge_color = map_color(on_event)
if edge_color:
edge_attrs['fontcolor'] = edge_color
g.add_edge(pydot.Edge(nodes[start_state], nodes[end_state],
**edge_attrs))
print_header(graph_name)
print(g.to_string().strip())
g.write(options.filename, format=options.format)
print_header("Created %s at '%s'" % (options.format, options.filename))
# To make the svg more pretty use the following:
# $ xsltproc ../diagram-tools/notugly.xsl ./states.svg > pretty-states.svg
# Get diagram-tools from https://github.com/vidarh/diagram-tools.git
if __name__ == '__main__':
main()
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/325_test_platform.py
|
54
|
import sys
import os
import unittest
import platform
import subprocess
from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
res = platform.architecture()
@support.skip_unless_symlink
def test_architecture_via_symlink(self): # issue3762
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["Path"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["Path"])
def get(python):
cmd = [python, '-c',
'import platform; print(platform.architecture())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
os.remove(link)
def test_platform(self):
for aliased in (False, True):
for terse in (False, True):
res = platform.platform(aliased, terse)
def test_system(self):
res = platform.system()
def test_node(self):
res = platform.node()
def test_release(self):
res = platform.release()
def test_version(self):
res = platform.version()
def test_machine(self):
res = platform.machine()
def test_processor(self):
res = platform.processor()
def setUp(self):
self.save_version = sys.version
self.save_subversion = sys.subversion
self.save_mercurial = sys._mercurial
self.save_platform = sys.platform
def tearDown(self):
sys.version = self.save_version
sys.subversion = self.save_subversion
sys._mercurial = self.save_mercurial
sys.platform = self.save_platform
def test_sys_version(self):
# Old test.
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
):
# branch and revision are not "parsed", but fetched
# from sys.subversion. Ignore them
(name, version, branch, revision, buildno, builddate, compiler) \
= platform._sys_version(input)
self.assertEqual(
(name, version, '', '', buildno, builddate, compiler), output)
# Tests for python_implementation(), python_version(), python_branch(),
# python_revision(), python_build(), and python_compiler().
sys_versions = {
("2.6.1 (r261:67515, Dec 6 2008, 15:26:00) \n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]",
('CPython', 'tags/r261', '67515'), self.save_platform)
:
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
("PyPy", "2.5.2", "trunk", "63378", ('63378', 'Mar 26 2009'),
"")
}
for (version_tag, subversion, sys_platform), info in \
sys_versions.items():
sys.version = version_tag
if subversion is None:
if hasattr(sys, "_mercurial"):
del sys._mercurial
if hasattr(sys, "subversion"):
del sys.subversion
else:
sys._mercurial = subversion
if sys_platform is not None:
sys.platform = sys_platform
self.assertEqual(platform.python_implementation(), info[0])
self.assertEqual(platform.python_version(), info[1])
self.assertEqual(platform.python_branch(), info[2])
self.assertEqual(platform.python_revision(), info[3])
self.assertEqual(platform.python_build(), info[4])
self.assertEqual(platform.python_compiler(), info[5])
def test_system_alias(self):
res = platform.system_alias(
platform.system(),
platform.release(),
platform.version(),
)
def test_uname(self):
res = platform.uname()
self.assertTrue(any(res))
@unittest.skipUnless(sys.platform.startswith('win'), "windows only test")
def test_uname_win32_ARCHITEW6432(self):
# Issue 7860: make sure we get architecture from the correct variable
# on 64 bit Windows: if PROCESSOR_ARCHITEW6432 exists we should be
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'foo')
environ['PROCESSOR_ARCHITEW6432'] = 'bar'
platform._uname_cache = None
system, node, release, version, machine, processor = platform.uname()
self.assertEqual(machine, 'bar')
finally:
platform._uname_cache = None
def test_java_ver(self):
res = platform.java_ver()
if sys.platform == 'java':
self.assertTrue(all(res))
def test_win32_ver(self):
res = platform.win32_ver()
def test_mac_ver(self):
res = platform.mac_ver()
if platform.uname()[0] == 'Darwin':
# We're on a MacOSX system, check that
# the right version information is returned
fd = os.popen('sw_vers', 'r')
real_ver = None
for ln in fd:
if ln.startswith('ProductVersion:'):
real_ver = ln.strip().split()[-1]
break
fd.close()
self.assertFalse(real_ver is None)
result_list = res[0].split('.')
expect_list = real_ver.split('.')
len_diff = len(result_list) - len(expect_list)
# On Snow Leopard, sw_vers reports 10.6.0 as 10.6
if len_diff > 0:
expect_list.extend(['0'] * len_diff)
self.assertEqual(result_list, expect_list)
# res[1] claims to contain
# (version, dev_stage, non_release_version)
# That information is no longer available
self.assertEqual(res[1], ('', '', ''))
if sys.byteorder == 'little':
self.assertIn(res[2], ('i386', 'x86_64'))
else:
self.assertEqual(res[2], 'PowerPC')
@unittest.skipUnless(sys.platform == 'darwin', "OSX only test")
def test_mac_ver_with_fork(self):
# Issue7895: platform.mac_ver() crashes when using fork without exec
#
# This test checks that the fix for that issue works.
#
pid = os.fork()
if pid == 0:
# child
info = platform.mac_ver()
os._exit(0)
else:
# parent
cpid, sts = os.waitpid(pid, 0)
self.assertEqual(cpid, pid)
self.assertEqual(sts, 0)
def test_dist(self):
res = platform.dist()
def test_libc_ver(self):
import os
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
executable = sys.executable + '.exe'
else:
executable = sys.executable
res = platform.libc_ver(executable)
def test_parse_release_file(self):
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64')),
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64')),
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586')),
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux')),
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche')),
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike')),
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant')),
('CentOS release 4', ('CentOS', '4', None)),
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia')),
('', ('', '', '')), # If there's nothing there.
):
self.assertEqual(platform._parse_release_file(input), output)
def test_main():
support.run_unittest(
PlatformTest
)
if __name__ == '__main__':
test_main()
|
vitalyisaev2/flatbuffers
|
refs/heads/master
|
tests/MyGame/Example/TestSimpleTableWithEnum.py
|
22
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: Example
import flatbuffers
class TestSimpleTableWithEnum(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsTestSimpleTableWithEnum(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TestSimpleTableWithEnum()
x.Init(buf, n + offset)
return x
# TestSimpleTableWithEnum
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TestSimpleTableWithEnum
def Color(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 2
def TestSimpleTableWithEnumStart(builder): builder.StartObject(1)
def TestSimpleTableWithEnumAddColor(builder, color): builder.PrependInt8Slot(0, color, 2)
def TestSimpleTableWithEnumEnd(builder): return builder.EndObject()
|
trishnaguha/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py
|
25
|
#!/usr/bin/python
#
# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_mysqlfirewallrule
version_added: "2.8"
short_description: Manage MySQL firewall rule instance.
description:
- Create, update and delete instance of MySQL firewall rule.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the MySQL firewall rule.
required: True
start_ip_address:
description:
- The start IP address of the MySQL firewall rule. Must be IPv4 format.
end_ip_address:
description:
- The end IP address of the MySQL firewall rule. Must be IPv4 format.
state:
description:
- Assert the state of the MySQL firewall rule. Use 'present' to create or update a rule and 'absent' to ensure it is not present.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) MySQL firewall rule
azure_rm_mysqlfirewallrule:
resource_group: TestGroup
server_name: testserver
name: rule1
start_ip_address: 10.0.0.17
end_ip_address: 10.0.0.20
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testserver/firewallRules/rule1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from azure.mgmt.rdbms.mysql import MySQLManagementClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMFirewallRules(AzureRMModuleBase):
"""Configuration class for an Azure RM MySQL firewall rule resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
start_ip_address=dict(
type='str'
),
end_ip_address=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.start_ip_address = None
self.end_ip_address = None
self.results = dict(changed=False)
self.state = None
self.to_do = Actions.NoAction
super(AzureRMFirewallRules, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_firewallrule()
if not old_response:
self.log("MySQL firewall rule instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("MySQL firewall rule instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if MySQL firewall rule instance has to be deleted or may be updated")
if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
self.to_do = Actions.Update
if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the MySQL firewall rule instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_firewallrule()
if not old_response:
self.results['changed'] = True
else:
self.results['changed'] = old_response.__ne__(response)
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("MySQL firewall rule instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_firewallrule()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_firewallrule():
time.sleep(20)
else:
self.log("MySQL firewall rule instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
return self.results
def create_update_firewallrule(self):
'''
Creates or updates MySQL firewall rule with the specified configuration.
:return: deserialized MySQL firewall rule instance state dictionary
'''
self.log("Creating / Updating the MySQL firewall rule instance {0}".format(self.name))
try:
response = self.mysql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name,
start_ip_address=self.start_ip_address,
end_ip_address=self.end_ip_address)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the MySQL firewall rule instance.')
self.fail("Error creating the MySQL firewall rule instance: {0}".format(str(exc)))
return response.as_dict()
def delete_firewallrule(self):
'''
Deletes specified MySQL firewall rule instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the MySQL firewall rule instance {0}".format(self.name))
try:
response = self.mysql_client.firewall_rules.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the MySQL firewall rule instance.')
self.fail("Error deleting the MySQL firewall rule instance: {0}".format(str(e)))
return True
def get_firewallrule(self):
'''
Gets the properties of the specified MySQL firewall rule.
:return: deserialized MySQL firewall rule instance state dictionary
'''
self.log("Checking if the MySQL firewall rule instance {0} is present".format(self.name))
found = False
try:
response = self.mysql_client.firewall_rules.get(resource_group_name=self.resource_group,
server_name=self.server_name,
firewall_rule_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("MySQL firewall rule instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the MySQL firewall rule instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMFirewallRules()
if __name__ == '__main__':
main()
|
DiCarloLab-Delft/PycQED_py3
|
refs/heads/develop
|
pycqed/measurement/openql_experiments/multi_qubit_oql.py
|
1
|
import numpy as np
import openql.openql as ql
import pycqed.measurement.openql_experiments.openql_helpers as oqh
from pycqed.utilities.general import int2base, suppress_stdout
from os.path import join
from pycqed.instrument_drivers.meta_instrument.LutMans.flux_lutman import _def_lm as _def_lm_flux
def single_flux_pulse_seq(qubit_indices: tuple,
platf_cfg: str):
p = oqh.create_program("single_flux_pulse_seq", platf_cfg)
k = oqh.create_kernel("main", p)
for idx in qubit_indices:
k.prepz(idx) # to ensure enough separation in timing
k.prepz(idx) # to ensure enough separation in timing
k.prepz(idx) # to ensure enough separation in timing
k.gate("wait", [], 0)
k.gate('fl_cw_02', [qubit_indices[0], qubit_indices[1]])
p.add_kernel(k)
p = oqh.compile(p)
return p
def flux_staircase_seq(platf_cfg: str):
p = oqh.create_program("flux_staircase_seq", platf_cfg)
k = oqh.create_kernel("main", p)
for i in range(1):
k.prepz(i) # to ensure enough separation in timing
for i in range(1):
k.gate('CW_00', [i])
k.gate('CW_00', [6])
for cw in range(8):
k.gate('fl_cw_{:02d}'.format(cw), [2, 0])
k.gate('fl_cw_{:02d}'.format(cw), [3, 1])
k.gate("wait", [0, 1, 2, 3], 200) # because scheduling is wrong.
p.add_kernel(k)
p = oqh.compile(p)
return p
def multi_qubit_off_on(qubits: list, initialize: bool,
second_excited_state: bool, platf_cfg: str):
"""
Performs an 'off_on' sequence on the qubits specified.
off: (RO) - prepz - - - RO
on: (RO) - prepz - x180 - - RO
2nd (RO) - prepz - X180 - X12 - RO (if second_excited_state == True)
Will cycle through all combinations of off and on. Last qubit in the list
is considered the Least Significant Qubit (LSQ).
Args:
qubits (list) : list of integers denoting the qubits to use
initialize (bool): if True does an extra initial measurement to
allow post selecting data.
second_excited_state (bool): if True includes the 2-state in the
combinations.
platf_cfg (str) : filepath of OpenQL platform config file
"""
if second_excited_state:
base = 3
else:
base = 2
combinations = [int2base(i, base=base, fixed_length=len(qubits)) for
i in range(base**len(qubits))]
p = oqh.create_program("multi_qubit_off_on", platf_cfg)
for i, comb in enumerate(combinations):
k = oqh.create_kernel('Prep_{}'.format(comb), p)
# 1. Prepare qubits in 0
for q in qubits:
k.prepz(q)
# 2. post-selection extra init readout
if initialize:
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
# 3. prepare desired state
for state, target_qubit in zip(comb, qubits): # N.B. last is LSQ
if state == '0':
pass
elif state == '1':
k.gate('rx180', [target_qubit])
elif state == '2':
k.gate('rx180', [target_qubit])
k.gate('rx12', [target_qubit])
# 4. measurement of all qubits
k.gate('wait', qubits, 0)
# Used to ensure timing is aligned
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def single_qubit_off_on(qubits: list,
qtarget,
initialize: bool,
platf_cfg: str):
n_qubits = len(qubits)
comb_0 = '0'*n_qubits
comb_1 = comb_0[:qubits.index(qtarget)] + '1' + comb_0[qubits.index(qtarget)+1:]
combinations = [comb_0, comb_1]
p = oqh.create_program("single_qubit_off_on", platf_cfg)
for i, comb in enumerate(combinations):
k = oqh.create_kernel('Prep_{}'.format(comb), p)
# 1. Prepare qubits in 0
for q in qubits:
k.prepz(q)
# 2. post-selection extra init readout
if initialize:
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
# 3. prepare desired state
for state, target_qubit in zip(comb, qubits): # N.B. last is LSQ
if state == '0':
pass
elif state == '1':
k.gate('rx180', [target_qubit])
elif state == '2':
k.gate('rx180', [target_qubit])
k.gate('rx12', [target_qubit])
# 4. measurement of all qubits
k.gate('wait', qubits, 0)
# Used to ensure timing is aligned
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def targeted_off_on(qubits: list,
q_target: int,
pulse_comb:str,
platf_cfg: str):
"""
Performs an 'off_on' sequence on the qubits specified.
off: prepz - - RO
on: prepz - x180 - RO
Will cycle through all combinations of computational states of every
qubit in <qubits> except the target qubit. The target qubit will be
initialized according to <pulse_comb>. 'Off' initializes the qubit in
the ground state and 'On' initializes the qubit in the excited state.
Args:
qubits (list) : list of integers denoting the qubits to use
q_target (str) : targeted qubit.
pulse_comb (str) : prepared state of target qubit.
platf_cfg (str) : filepath of OpenQL platform config file
"""
nr_qubits = len(qubits)
idx = qubits.index(q_target)
combinations = ['{:0{}b}'.format(i, nr_qubits-1) for i in range(2**(nr_qubits-1))]
for i, comb in enumerate(combinations):
comb = list(comb)#
if 'on' in pulse_comb.lower():
comb.insert(idx, '1')
elif 'off' in pulse_comb.lower():
comb.insert(idx, '0')
else:
raise ValueError()
combinations[i] = ''.join(comb)
p = oqh.create_program("Targeted_off_on", platf_cfg)
for i, comb in enumerate(combinations):
k = oqh.create_kernel('Prep_{}'.format(comb), p)
# 1. Prepare qubits in 0
for q in qubits:
k.prepz(q)
# 2. prepare desired state
for state, target_qubit in zip(comb, qubits): # N.B. last is LSQ
if state == '0':
pass
elif state == '1':
k.gate('rx180', [target_qubit])
# 3. measurement of all qubits
k.gate('wait', qubits, 0)
# Used to ensure timing is aligned
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def targeted_off_on(qubits: list,
q_target: int,
pulse_comb:str,
platf_cfg: str):
"""
Performs an 'off_on' sequence on the qubits specified.
off: prepz - - RO
on: prepz - x180 - RO
Will cycle through all combinations of computational states of every
qubit in <qubits> except the target qubit. The target qubit will be
initialized according to <pulse_comb>. 'Off' initializes the qubit in
the ground state and 'On' initializes the qubit in the excited state.
Args:
qubits (list) : list of integers denoting the qubits to use
q_target (str) : targeted qubit.
pulse_comb (str) : prepared state of target qubit.
platf_cfg (str) : filepath of OpenQL platform config file
"""
nr_qubits = len(qubits)
idx = qubits.index(q_target)
combinations = ['{:0{}b}'.format(i, nr_qubits-1) for i in range(2**(nr_qubits-1))]
for i, comb in enumerate(combinations):
comb = list(comb)#
if 'on' in pulse_comb.lower():
comb.insert(idx, '1')
elif 'off' in pulse_comb.lower():
comb.insert(idx, '0')
else:
raise ValueError()
combinations[i] = ''.join(comb)
p = oqh.create_program("Targeted_off_on", platf_cfg)
for i, comb in enumerate(combinations):
k = oqh.create_kernel('Prep_{}'.format(comb), p)
# 1. Prepare qubits in 0
for q in qubits:
k.prepz(q)
# 2. prepare desired state
for state, target_qubit in zip(comb, qubits): # N.B. last is LSQ
if state == '0':
pass
elif state == '1':
k.gate('rx180', [target_qubit])
# 3. measurement of all qubits
k.gate('wait', qubits, 0)
# Used to ensure timing is aligned
for q in qubits:
k.measure(q)
k.gate('wait', qubits, 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def Ramsey_msmt_induced_dephasing(qubits: list, angles: list, platf_cfg: str,
target_qubit_excited: bool=False, wait_time=0,
extra_echo=False):
"""
Ramsey sequence that varies azimuthal phase instead of time. Works for
a single qubit or multiple qubits. The coherence of the LSQ is measured,
while the whole list of qubits is measured.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
note: executes the measurement between gates to measure the measurement
induced dephasing
Input pars:
qubits: list specifying the targeted qubit MSQ, and the qubit
of which the coherence is measured LSQ.
angles: the list of angles for each Ramsey element
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Ramsey_msmt_induced_dephasing", platf_cfg)
for i, angle in enumerate(angles[:-4]):
cw_idx = angle//20 + 9
k = oqh.create_kernel("Ramsey_azi_"+str(angle), p)
for qubit in qubits:
k.prepz(qubit)
if len(qubits)>1 and target_qubit_excited:
for qubit in qubits[:-1]:
k.gate('rx180', [qubit])
k.gate('rx90', [qubits[-1]])
k.gate("wait", [], 0) #alignment workaround
for qubit in qubits:
k.measure(qubit)
k.gate("wait", [], 0) #alignment workaround
if extra_echo:
k.gate('rx180', [qubits[-1]])
k.gate("wait", qubits, round(wait_time*1e9))
k.gate("wait", [], 0) #alignment workaround
if len(qubits)>1 and target_qubit_excited:
for qubit in qubits[:-1]:
k.gate('rx180', [qubit])
if angle == 90:
# special because the cw phase pulses go in mult of 20 deg
k.gate('ry90', [qubits[-1]])
elif angle == 0:
k.gate('rx90', [qubits[-1]])
else:
k.gate('cw_{:02}'.format(cw_idx), [qubits[-1]])
p.add_kernel(k)
# adding the calibration points
oqh.add_single_qubit_cal_points(p, qubit_idx=qubits[-1], measured_qubits=qubits)
p = oqh.compile(p)
return p
def echo_msmt_induced_dephasing(qubits: list, angles: list, platf_cfg: str,
wait_time: float=0, target_qubit_excited: bool=False,
extra_echo: bool=False):
"""
Ramsey sequence that varies azimuthal phase instead of time. Works for
a single qubit or multiple qubits. The coherence of the LSQ is measured,
while the whole list of qubits is measured.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
note: executes the measurement between gates to measure the measurement
induced dephasing
Input pars:
qubits: list specifying the targeted qubit MSQ, and the qubit
of which the coherence is measured LSQ.
angles: the list of angles for each Ramsey element
platf_cfg: filename of the platform config file
wait_time wait time to acount for the measurement time in parts
of the echo sequence without measurement pulse
Circuit looks as follows:
qubits[:-1] -----------------------(x180)[variable msmt](x180)
qubits[-1] - x90-wait-(x180)-wait- x180-wait-(x180)-wait-x90 - [strong mmt]
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program('echo_msmt_induced_dephasing', platf_cfg)
for i, angle in enumerate(angles[:-4]):
cw_idx = angle//20 + 9
k = oqh.create_kernel('echo_azi_{}'.format(angle), p)
for qubit in qubits:
k.prepz(qubit)
k.gate('rx90', [qubits[-1]])
k.gate("wait", qubits, round(wait_time*1e9))
k.gate("wait", [], 0) #alignment workaround
if extra_echo:
k.gate('rx180', [qubits[-1]])
k.gate("wait", qubits, round(wait_time*1e9))
k.gate('rx180', [qubits[-1]])
if len(qubits)>1 and target_qubit_excited:
for qubit in qubits[:-1]:
k.gate('rx180', [qubit])
k.gate("wait", [], 0) #alignment workaround
for qubit in qubits:
k.measure(qubit)
k.gate("wait", [], 0) #alignment workaround
if extra_echo:
k.gate('rx180', [qubits[-1]])
k.gate("wait", qubits, round(wait_time*1e9))
if len(qubits)>1 and target_qubit_excited:
for qubit in qubits[:-1]:
k.gate('rx180', [qubit])
if angle == 90:
# special because the cw phase pulses go in mult of 20 deg
k.gate('ry90', [qubits[-1]])
elif angle == 0:
k.gate('rx90', [qubits[-1]])
else:
k.gate('cw_{:02}'.format(cw_idx), [qubits[-1]])
k.gate("wait", [], 0) #alignment workaround
p.add_kernel(k)
# adding the calibration points
p = oqh.add_single_qubit_cal_points(p, qubit_idx=qubits[-1], measured_qubits=qubits)
p = oqh.compile(p)
return p
def two_qubit_off_on(q0: int, q1: int, platf_cfg: str):
'''
off_on sequence on two qubits.
# FIXME: input arg should be "qubits" as a list
Args:
q0, q1 (int) : target qubits for the sequence
platf_cfg: str
'''
p = oqh.create_program('two_qubit_off_on', platf_cfg)
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
return p
def two_qubit_tomo_cardinal(q0: int, q1: int, cardinal: int, platf_cfg: str):
'''
Cardinal tomography for two qubits.
Args:
cardinal (int) : index of prep gate
q0, q1 (int) : target qubits for the sequence
'''
tomo_pulses = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
tomo_list_q0 = tomo_pulses
tomo_list_q1 = tomo_pulses
prep_index_q0 = int(cardinal % len(tomo_list_q0))
prep_index_q1 = int(((cardinal - prep_index_q0) / len(tomo_list_q0) %
len(tomo_list_q1)))
prep_pulse_q0 = tomo_list_q0[prep_index_q0]
prep_pulse_q1 = tomo_list_q1[prep_index_q1]
p = oqh.create_program('two_qubit_tomo_cardinal', platf_cfg)
# Tomography pulses
i = 0
for p_q1 in tomo_list_q1:
for p_q0 in tomo_list_q0:
i += 1
kernel_name = '{}_{}_{}'.format(i, p_q0, p_q1)
k = oqh.create_kernel(kernel_name, p)
k.prepz(q0)
k.prepz(q1)
k.gate(prep_pulse_q0, [q0])
k.gate(prep_pulse_q1, [q1])
k.gate(p_q0, [q0])
k.gate(p_q1, [q1])
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# every calibration point is repeated 7 times. This is copied from the
# script for Tektronix driven qubits. I do not know if this repetition
# is important or even necessary here.
p = oqh.add_two_q_cal_points(p, q0=q1, q1=q0, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def two_qubit_AllXY(q0: int, q1: int, platf_cfg: str,
sequence_type='sequential',
replace_q1_pulses_with: str = None,
repetitions: int = 1):
"""
AllXY sequence on two qubits.
Has the option of replacing pulses on q1 with pi pulses
Args:
q0, q1 (str) : target qubits for the sequence
sequence_type (str) : Describes the timing/order of the pulses.
options are: sequential | interleaved | simultaneous | sandwiched
q0|q0|q1|q1 q0|q1|q0|q1 q01|q01 q1|q0|q0|q1
describes the order of the AllXY pulses
replace_q1_pulses_with (bool) : if True replaces all pulses on q1 with
X180 pulses.
double_points (bool) : if True measures each point in the AllXY twice
"""
p = oqh.create_program('two_qubit_AllXY', platf_cfg)
pulse_combinations = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'],
['rx180', 'ry180'], ['ry180', 'rx180'],
['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'],
['ry90', 'rx90'], ['rx90', 'ry180'],
['ry90', 'rx180'],
['rx180', 'ry90'], ['ry180', 'rx90'],
['rx90', 'rx180'],
['rx180', 'rx90'], ['ry90', 'ry180'],
['ry180', 'ry90'],
['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'],
['ry90', 'ry90']]
pulse_combinations_q0 = np.repeat(pulse_combinations, repetitions, axis=0)
if replace_q1_pulses_with is not None:
# pulse_combinations_q1 = [[replace_q1_pulses_with]*2 for val in pulse_combinations]
pulse_combinations_q1 = np.repeat(
[[replace_q1_pulses_with] * 2], len(pulse_combinations_q0), axis=0)
else:
pulse_combinations_q1 = np.tile(pulse_combinations, [repetitions, 1])
i = 0
for pulse_comb_q0, pulse_comb_q1 in zip(pulse_combinations_q0,
pulse_combinations_q1):
i += 1
k = oqh.create_kernel('AllXY_{}'.format(i), p)
k.prepz(q0)
k.prepz(q1)
# N.B. The identity gates are there to ensure proper timing
if sequence_type == 'interleaved':
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'sandwiched':
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'sequential':
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'simultaneous':
k.gate(pulse_comb_q0[0], [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate(pulse_comb_q1[1], [q1])
else:
raise ValueError("sequence_type {} ".format(sequence_type) +
"['interleaved', 'simultaneous', " +
"'sequential', 'sandwiched']")
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
p = oqh.compile(p)
return p
def residual_coupling_sequence(times, q0: int, q_spectator_idx: list,
spectator_state: str, platf_cfg: str):
"""
Sequence to measure the residual (ZZ) interaction between two qubits.
Procedure is described in M18TR.
(q0) --X90----(tau)---Y180-(tau)-Y90--RO
(qs) --[X180]-(tau)-[X180]-(tau)-------RO
Input pars:
times: the list of waiting times in s for each Echo element
q0 Phase measurement is performed on q0
q_spectator_idx Excitation is put in and removed on these qubits
as indicated
spectator_state Indicates on which qubit to put excitations.
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("residual_coupling_sequence", platf_cfg)
all_qubits = [q0]+q_spectator_idx
n_qubits = len(all_qubits)
gate_spec = [s.replace('0', 'i').replace('1', 'rx180') for s in spectator_state]
for i, time in enumerate(times[:-2]):
k = oqh.create_kernel("residual_coupling_seq_{}".format(i), p)
k.prepz(q0)
for q_s in q_spectator_idx:
k.prepz(q_s)
wait_nanoseconds = int(round(time/1e-9))
k.gate('rx90', [q0])
for i_s, q_s in enumerate(q_spectator_idx):
k.gate(gate_spec[i_s], [q_s])
k.gate("wait", all_qubits, wait_nanoseconds)
k.gate('rx180', [q0])
for i_s, q_s in enumerate(q_spectator_idx):
k.gate(gate_spec[i_s], [q_s])
k.gate("wait", all_qubits, wait_nanoseconds)
# k.gate('rxm90', [q0])
k.gate('ry90', [q0])
k.measure(q0)
for q_s in q_spectator_idx:
k.measure(q_s)
k.gate("wait", all_qubits, 0)
p.add_kernel(k)
# adding the calibration points
p = oqh.add_multi_q_cal_points(p, qubits=all_qubits,
combinations=['0'*n_qubits,'1'*n_qubits])
p = oqh.compile(p)
return p
def Cryoscope(
qubit_idx: int,
buffer_time1=0,
buffer_time2=0,
flux_cw: str = 'fl_cw_06',
twoq_pair=[2, 0],
platf_cfg: str = '',
cc: str = 'CCL',
double_projections: bool = True,
):
"""
Single qubit Ramsey sequence.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
times: the list of waiting times for each Ramsey element
qubit_idx: int specifying the target qubit (starting at 0)
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Cryoscope", platf_cfg)
buffer_nanoseconds1 = int(round(buffer_time1 / 1e-9))
buffer_nanoseconds2 = int(round(buffer_time2 / 1e-9))
if cc.upper() == 'CCL':
flux_target = twoq_pair
elif cc.upper() == 'QCC' or cc.upper() =='CC':
flux_target = [qubit_idx]
cw_idx = int(flux_cw[-2:])
flux_cw = 'sf_{}'.format(_def_lm_flux[cw_idx]['name'].lower())
else:
raise ValueError('CC type not understood: {}'.format(cc))
k = oqh.create_kernel("RamZ_X", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate("wait", [], 0) # alignment workaround
k.gate(flux_cw, flux_target)
# k.gate(flux_cw, [10, 8])
k.gate("wait", [], 0) # alignment workaround
k.gate("wait", [qubit_idx], buffer_nanoseconds2)
k.gate('rx90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
k = oqh.create_kernel("RamZ_Y", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate("wait", [], 0) # alignment workaround
k.gate(flux_cw, flux_target)
# k.gate(flux_cw, [10, 8])
k.gate("wait", [], 0) # alignment workaround
k.gate("wait", [qubit_idx], buffer_nanoseconds2)
k.gate('ry90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
if double_projections:
k = oqh.create_kernel("RamZ_mX", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate("wait", [], 0) # alignment workaround
k.gate(flux_cw, flux_target)
# k.gate(flux_cw, [10, 8])
k.gate("wait", [], 0) # alignment workaround
k.gate("wait", [qubit_idx], buffer_nanoseconds2)
k.gate('rxm90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
k = oqh.create_kernel("RamZ_mY", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate("wait", [], 0) # alignment workaround
k.gate(flux_cw, flux_target)
# k.gate(flux_cw, [10, 8])
k.gate("wait", [], 0) # alignment workaround
k.gate("wait", [qubit_idx], buffer_nanoseconds2)
k.gate('rym90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
p = oqh.compile(p)
return p
def CryoscopeGoogle(qubit_idx: int, buffer_time1, times, platf_cfg: str):
"""
A Ramsey sequence with varying waiting times `times` around a flux pulse.
Generates 2xlen(times) measurements (t1-x, t1-y, t2-x, t2-y. etc)
"""
p = oqh.create_program("CryoscopeGoogle", platf_cfg)
buffer_nanoseconds1 = int(round(buffer_time1/1e-9))
for i_t,t in enumerate(times):
t_nanoseconds = int(round(t/1e-9))
k = oqh.create_kernel("RamZ_X_{}".format(i_t), p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [], 0) #alignment workaround
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate('fl_cw_02', [2, 0])
k.gate("wait", [qubit_idx], t_nanoseconds)
k.gate("wait", [], 0) #alignment workaround
k.gate('rx90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
k = oqh.create_kernel("RamZ_Y_{}".format(i_t), p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx])
k.gate("wait", [], 0) #alignment workaround
k.gate("wait", [qubit_idx], buffer_nanoseconds1)
k.gate('fl_cw_02', [2, 0])
k.gate("wait", [qubit_idx], t_nanoseconds)
k.gate("wait", [], 0) #alignment workaround
k.gate('ry90', [qubit_idx])
k.measure(qubit_idx)
p.add_kernel(k)
p = oqh.compile(p)
return p
def fluxed_ramsey(qubit_idx: int, wait_time: float,
flux_cw: str='fl_cw_02',
platf_cfg: str=''):
"""
Single qubit Ramsey sequence.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
maxtime: longest plux pulse time
qubit_idx: int specifying the target qubit (starting at 0)
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program('OpenQL_Platform', platf_cfg)
wait_time = wait_time/1e-9
k = oqh.create_kernel("fluxed_ramsey_1", p)
k.prepz(qubit_idx)
k.gate('rx90', qubit_idx)
k.gate("wait", [], 0) #alignment workaround
k.gate(flux_cw, 2, 0)
k.gate("wait", [qubit_idx], wait_time)
k.gate("wait", [], 0) #alignment workaround
k.gate('rx90', qubit_idx)
k.measure(qubit_idx)
p.add_kernel(k)
k = oqh.create_kernel("fluxed_ramsey_2", p)
k.prepz(qubit_idx)
k.gate("wait", [], 0) #alignment workaround
k.gate('rx90', qubit_idx)
k.gate(flux_cw, 2, 0)
k.gate("wait", [qubit_idx], wait_time)
k.gate("wait", [], 0) #alignment workaround
k.gate('ry90', qubit_idx)
k.measure(qubit_idx)
p.add_kernel(k)
# adding the calibration points
# add_single_qubit_cal_points(p, platf=platf, qubit_idx=qubit_idx)
p = oqh.compile(p)
return p
# FIMXE: merge into the real chevron seq
def Chevron_hack(qubit_idx: int, qubit_idx_spec,
buffer_time, buffer_time2, platf_cfg: str):
"""
Single qubit Ramsey sequence.
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
times: the list of waiting times for each Ramsey element
qubit_idx: int specifying the target qubit (starting at 0)
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Chevron_hack", platf_cfg)
buffer_nanoseconds = int(round(buffer_time/1e-9))
buffer_nanoseconds2 = int(round(buffer_time/1e-9))
k = oqh.create_kernel("Chevron_hack", p)
k.prepz(qubit_idx)
k.gate('rx90', [qubit_idx_spec])
k.gate('rx180', [qubit_idx])
k.gate("wait", [], 0) #alignment workaround
k.gate("wait", [qubit_idx], buffer_nanoseconds)
k.gate('fl_cw_02', [2, 0])
k.gate('wait', [qubit_idx], buffer_nanoseconds2)
k.gate("wait", [], 0) #alignment workaround
k.gate('rx180', [qubit_idx])
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
p.add_kernel(k)
p = oqh.compile(p)
return p
def Chevron(qubit_idx: int, qubit_idx_spec: int, qubit_idx_park: int,
buffer_time, buffer_time2, flux_cw: int, platf_cfg: str,
measure_parked_qubit: bool = False,
target_qubit_sequence: str = 'ramsey', cc: str = 'CCL',
recover_q_spec: bool = False):
"""
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
qubit_idx: int specifying the target qubit (starting at 0)
qubit_idx_spec: int specifying the spectator qubit
buffer_time :
buffer_time2 :
measure_parked_qubit (bool): Whether we set a measurement on the parked qubit
platf_cfg: filename of the platform config file
target_qubit_sequence: selects whether to run a ramsey sequence on
a target qubit ('ramsey'), keep it in gorund state ('ground')
or excite it iat the beginning of the sequnce ('excited')
recover_q_spec (bool): applies the first gate of qspec at the end
as well if `True`
Returns:
p: OpenQL Program object containing
Circuit:
q0 -x180-flux-x180-RO-
qspec --x90-----(x90)-RO- (target_qubit_sequence='ramsey')
q0 -x180-flux-x180-RO-
qspec -x180----(x180)-RO- (target_qubit_sequence='excited')
q0 -x180-flux-x180-RO-
qspec ----------------RO- (target_qubit_sequence='ground')
"""
p = oqh.create_program("Chevron", platf_cfg)
buffer_nanoseconds = int(round(buffer_time / 1e-9))
buffer_nanoseconds2 = int(round(buffer_time2 / 1e-9))
if flux_cw is None:
flux_cw = 2
flux_cw_name = _def_lm_flux[flux_cw]['name'].lower()
k = oqh.create_kernel("Chevron", p)
k.prepz(qubit_idx)
k.prepz(qubit_idx_spec)
if (qubit_idx_park is not None):
k.prepz(qubit_idx_park)
spec_gate_dict = {
"ramsey": "rx90",
"excited": "rx180",
"ground": "i"
}
spec_gate = spec_gate_dict[target_qubit_sequence]
k.gate(spec_gate, [qubit_idx_spec])
k.gate('rx180', [qubit_idx])
if buffer_nanoseconds > 0:
k.gate("wait", [qubit_idx], buffer_nanoseconds)
# For CCLight
if cc.upper() == 'CCL':
k.gate("wait", [], 0) # alignment workaround
k.gate('fl_cw_{:02}'.format(flux_cw), [2, 0])
if qubit_idx_park is not None:
k.gate('fl_cw_05', [qubit_idx_park]) # square pulse
k.gate("wait", [], 0) # alignment workaround
elif cc.upper() == 'QCC' or cc.upper() == 'CC':
k.gate("wait", [], 0) # alignment workaround
if qubit_idx_park is not None:
k.gate('sf_square', [qubit_idx_park])
k.gate('sf_{}'.format(flux_cw_name), [qubit_idx])
k.gate("wait", [], 0) # alignment workaround
else:
raise ValueError('CC type not understood: {}'.format(cc))
if buffer_nanoseconds2 > 0:
k.gate('wait', [qubit_idx], buffer_nanoseconds2)
k.gate('rx180', [qubit_idx])
if recover_q_spec:
k.gate(spec_gate, [qubit_idx_spec])
k.gate("wait", [], 0) # alignment workaround
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
if (qubit_idx_park is not None) and measure_parked_qubit:
k.measure(qubit_idx_park)
p.add_kernel(k)
p = oqh.compile(p)
return p
def two_qubit_ramsey(times, qubit_idx: int, qubit_idx_spec: int,
platf_cfg: str, target_qubit_sequence: str='excited'):
"""
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
times: the list of waiting times for each Ramsey element
qubit_idx: int specifying the target qubit (starting at 0)
qubit_idx_spec: int specifying the spectator qubit
platf_cfg: filename of the platform config file
target_qubit_sequence: selects whether to run a ramsey sequence on
a target qubit ('ramsey'), keep it in gorund state ('ground')
or excite it iat the beginning of the sequnce ('excited')
Returns:
p: OpenQL Program object containing
Circuit:
q0 --x90-wait-x90-RO-
qspec --x90----------RO- (target_qubit_sequence='ramsey')
q0 --x90-wait-x90-RO-
qspec -x180----------RO- (target_qubit_sequence='excited')
q0 --x90-wait-x90-RO-
qspec ---------------RO- (target_qubit_sequence='ground')
"""
p = oqh.create_program("two_qubit_ramsey", platf_cfg)
for i, time in enumerate(times):
k = oqh.create_kernel("two_qubit_ramsey_{}".format(i), p)
k.prepz(qubit_idx)
if target_qubit_sequence == 'ramsey':
k.gate('rx90', [qubit_idx_spec])
elif target_qubit_sequence == 'excited':
k.gate('rx180', [qubit_idx_spec])
elif target_qubit_sequence == 'ground':
k.gate('i', [qubit_idx_spec])
else:
raise ValueError('target_qubit_sequence not recognized.')
k.gate('rx90', [qubit_idx])
wait_nanoseconds = int(round(time/1e-9))
k.gate("wait", [qubit_idx, qubit_idx_spec], wait_nanoseconds)
k.gate('i', [qubit_idx_spec])
k.gate('rx90', [qubit_idx])
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
k.gate("wait", [qubit_idx, qubit_idx_spec], 0)
p.add_kernel(k)
# adding the calibration points
oqh.add_two_q_cal_points(p, qubit_idx, qubit_idx_spec, reps_per_cal_pt=2)
p = oqh.compile(p)
return p
def two_qubit_tomo_bell(bell_state, q0, q1,
platf_cfg, wait_after_flux: float=None
, flux_codeword: str='cz'):
'''
Two qubit bell state tomography.
Args:
bell_state (int): index of prepared bell state
q0, q1 (str): names of the target qubits
wait_after_flux (float): wait time after the flux pulse and
after-rotation before tomographic rotations
'''
tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
# Choose a bell state and set the corresponding preparation pulses
if bell_state == 0: # |Phi_m>=|00>-|11>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'ry90'
elif bell_state % 10 == 1: # |Phi_p>=|00>+|11>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'ry90'
elif bell_state % 10 == 2: # |Psi_m>=|01>-|10>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'rym90'
elif bell_state % 10 == 3: # |Psi_p>=|01>+|10>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'rym90'
else:
raise ValueError('Bell state {} is not defined.'.format(bell_state))
# Recovery pulse is the same for all Bell states
after_pulse_q1 = 'rym90'
# # Define compensation pulses
# # FIXME: needs to be added
# print('Warning: not using compensation pulses.')
p = oqh.create_program("two_qubit_tomo_bell_{}_{}".format(q1, q0), platf_cfg)
for p_q1 in tomo_gates:
for p_q0 in tomo_gates:
k = oqh.create_kernel(
"BellTomo_{}{}_{}{}".format(q1, p_q1, q0, p_q0), p)
# next experiment
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# pre-rotations
k.gate(prep_pulse_q0, [q0])
k.gate(prep_pulse_q1, [q1])
# FIXME hardcoded edge because of
# brainless "directed edge recources" in compiler
k.gate("wait", [], 0)# Empty list generates barrier for all qubits in platf. only works with 0.8.0
# k.gate('cz', [q0, q1])
k.gate(flux_codeword, [q0, q1])
k.gate("wait", [], 0)
# after-rotations
k.gate(after_pulse_q1, [q1])
# possibly wait
if wait_after_flux is not None:
k.gate("wait", [q0, q1], round(wait_after_flux*1e9))
# tomo pulses
k.gate(p_q0, [q1])
k.gate(p_q1, [q0])
# measure
k.measure(q0)
k.measure(q1)
# sync barrier before tomo
# k.gate("wait", [q0, q1], 0)
# k.gate("wait", [2, 0], 0)
p.add_kernel(k)
# 7 repetitions is because of assumptions in tomo analysis
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def two_qubit_tomo_bell_by_waiting(bell_state, q0, q1,
platf_cfg, wait_time: int=20):
'''
Two qubit (bell) state tomography. There are no flux pulses applied,
only waiting time. It is supposed to take advantage of residual ZZ to
generate entanglement.
Args:
bell_state (int): index of prepared bell state
q0, q1 (str): names of the target qubits
wait_time (int): waiting time in which residual ZZ acts
on qubits
'''
tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
# Choose a bell state and set the corresponding preparation pulses
if bell_state == 0: # |Phi_m>=|00>-|11>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'ry90'
elif bell_state % 10 == 1: # |Phi_p>=|00>+|11>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'ry90'
elif bell_state % 10 == 2: # |Psi_m>=|01>-|10>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'rym90'
elif bell_state % 10 == 3: # |Psi_p>=|01>+|10>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'rym90'
else:
raise ValueError('Bell state {} is not defined.'.format(bell_state))
# Recovery pulse is the same for all Bell states
after_pulse_q1 = 'rym90'
p = oqh.create_program("two_qubit_tomo_bell_by_waiting", platf_cfg)
for p_q1 in tomo_gates:
for p_q0 in tomo_gates:
k = oqh.create_kernel("BellTomo_{}{}_{}{}".format(
q1, p_q1, q0, p_q0), p)
# next experiment
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# pre-rotations
k.gate(prep_pulse_q0, [q0])
k.gate(prep_pulse_q1, [q1])
if wait_time > 0:
k.wait([q0, q1], wait_time)
k.gate(after_pulse_q1, [q1])
# tomo pulses
k.gate(p_q1, [q0])
k.gate(p_q0, [q1])
# measure
k.measure(q0)
k.measure(q1)
# sync barrier before tomo
# k.gate("wait", [q0, q1], 0)
k.gate("wait", [2, 0], 0)
p.add_kernel(k)
# 7 repetitions is because of assumptions in tomo analysis
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def two_qubit_DJ(q0, q1, platf_cfg):
'''
Two qubit Deutsch-Josza.
Args:
q0, q1 (str): names of the target qubits
'''
p = oqh.create_program("two_qubit_DJ", platf_cfg)
# experiments
# 1
k = oqh.create_kernel("DJ1", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 2
k = oqh.create_kernel("DJ2", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# rotations
k.gate('rx180', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 3
k = oqh.create_kernel("DJ3", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# rotations
k.gate('ry90', [q1])
k.gate('rx180', [q0])
k.gate('rx180', [q1])
# Hardcoded flux pulse, FIXME use actual CZ
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], 100)
k.gate('fl_cw_01', [2, 0])
# FIXME hardcoded extra delays
k.gate('wait', [2, 0], 200)
k.gate("wait", [], 0) #alignment workaround
k.gate('rx180', [q0])
k.gate('ry90', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 4
k = oqh.create_kernel("DJ4", p)
k.prepz(q0) # to ensure enough separation in timing
k.prepz(q1) # to ensure enough separation in timing
# prerotations
k.gate('ry90', [q0])
k.gate('rym90', [q1])
# rotations
k.gate('rym90', [q1])
# Hardcoded flux pulse, FIXME use actual CZ
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], 100)
k.gate('fl_cw_01', [2, 0])
# FIXME hardcoded extra delays
k.gate('wait', [2, 0], 200)
k.gate("wait", [], 0) #alignment workaround
k.gate('rx180', [q1])
k.gate('rym90', [q1])
# post rotations
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# measure
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# 7 repetitions is because of assumptions in tomo analysis
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def single_qubit_parity_check(qD: int, qA: int, platf_cfg: str,
number_of_repetitions: int = 10,
initialization_msmt: bool=False,
initial_states=['0', '1'],
flux_codeword: str = 'cz',
parity_axis='Z'):
"""
Implements a circuit for repeated parity checks.
Circuit looks as follows:
Data (M)|------0------- | ^N- M
M
| | |
Ancilla (M)|-my90-0-y90-M- | - M
The initial "M" measurement is optional, the circuit is repated N times
At the end both qubits are measured.
Arguments:
qD : Data qubit, this is the qubit that the repeated parity
check will be performed on.
qA : Ancilla qubit, qubit that the parity will be mapped onto.
platf_cfg: filename of the platform config file
number_of_repetitions: number of times to repeat the circuit
initialization_msmt : whether to start with an initial measurement
to prepare the starting state.
"""
p = oqh.create_program("single_qubit_repeated_parity_check", platf_cfg)
for k, initial_state in enumerate(initial_states):
k = oqh.create_kernel(
'repeated_parity_check_{}'.format(k), p)
k.prepz(qD)
k.prepz(qA)
if initialization_msmt:
k.measure(qA)
k.measure(qD)
k.gate("wait", []) #wait on all
if initial_state == '1':
k.gate('ry180', [qD])
elif initial_state == '+':
k.gate('ry90', [qD])
elif initial_state == '-':
k.gate('rym90', [qD])
elif initial_state == 'i':
k.gate('rx90', [qD])
elif initial_state == '-i':
k.gate('rxm90', [qD])
elif initial_state == '0':
pass
else:
raise ValueError('initial_state= '+initial_state+' not recognized')
for i in range(number_of_repetitions):
k.gate('rym90', [qA])
if parity_axis=='X':
k.gate('rym90', [qD])
k.gate("wait", [], 0) #alignment workaround
k.gate(flux_codeword, [qA, qD])
k.gate("wait", [], 0) #alignment workaround
k.gate('ry90', [qA])
k.gate('wait', [qA, qD], 0)
if parity_axis=='X':
k.gate('ry90', [qD])
k.measure(qA)
k.measure(qD)
# hardcoded barrier because of openQL #104
# k.gate('wait', [2, 0], 0)
k.gate('wait', [qA, qD], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def two_qubit_parity_check(qD0: int, qD1: int, qA: int, platf_cfg: str,
echo: bool=False,
number_of_repetitions: int = 10,
initialization_msmt: bool=False,
initial_states=[['0','0'], ['0','1'], ['1','1',], ['1','0']],
flux_codeword: str = 'cz',
# flux_codeword1: str = 'cz',
parity_axes=['ZZ'], tomo=False,
tomo_after=False,
ro_time=500e-9,
echo_during_ancilla_mmt: bool=False,
idling_time: float=40e-9,
idling_time_echo: float=20e-9,
idling_rounds: int=0):
"""
Implements a circuit for repeated parity checks on two qubits.
Circuit looks as follows:
^N
Data0 ----prep.|------0-------(wait) (echo) (wait)| (tomo) -MMMMMMMMMMMMMMMMMMMM
| | |
Ancilla (M)------|-my90-0-0-y90-MMMMMMMMMMMMMMMMMMMM|
| | |
Data1 ----prep.|--------0-----(wait) (echo) (wait)| (tomo) -MMMMMMMMMMMMMMMMMMMM
The initial "M" measurement is optional, the circuit is repated N times
At the end both qubits are measured.
Arguments:
qD0 : Data qubit, this is the qubit that the repeated parity
check will be performed on.
qD1 : Data qubit, this is the qubit that the repeated parity
check will be performed on.
exho: additional pi-pulse between the CZs
qA : Ancilla qubit, qubit that the parity will be mapped onto.
platf_cfg: filename of the platform config file
number_of_repetitions: number of times to repeat the circuit
initialization_msmt : whether to start with an initial measurement
to prepare the starting state.
"""
p = oqh.create_program("two_qubit_parity_check", platf_cfg)
data_qubits=[qD0,qD1]
if tomo:
tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
else:
tomo_gates = ['False']
for p_q1 in tomo_gates:
for p_q0 in tomo_gates:
for initial_state in initial_states:
k = oqh.create_kernel(
'repeated_parity_check_'+initial_state[0]+initial_state[1]+'_tomo0_'+p_q0+'_tomo1_'+p_q1,p)
k.prepz(qD0)
k.prepz(qD1)
k.prepz(qA)
#initialization
if initialization_msmt:
k.gate("wait", [], 0) #alignment workaround
# k.measure(qD0)
# k.measure(qD1)
k.measure(qA)
if echo_during_ancilla_mmt:
k.gate('wait', [qA, qD0, qD1], int(ro_time*1e9))
k.gate('wait', [qD0, qD1, qA], int(100)) #adding additional wait time to ensure good initialization
k.gate("wait", [], 0) #alignment workaround
#state preparation
for i, initial_state_q in enumerate(initial_state):
if initial_state_q == '1':
k.gate('ry180', [data_qubits[i]])
elif initial_state_q == '+':
k.gate('ry90', [data_qubits[i]])
elif initial_state_q == '-':
k.gate('rym90', [data_qubits[i]])
elif initial_state_q == 'i':
k.gate('rx90', [data_qubits[i]])
elif initial_state_q == '-i':
k.gate('rxm90', [data_qubits[i]])
elif initial_state_q == '0':
pass
else:
raise ValueError('initial_state_q= '+initial_state_q+' not recognized')
#parity measurement(s)
for i in range(number_of_repetitions):
for parity_axis in parity_axes:
k.gate("wait", [], 0) #alignment workaround
if parity_axis=='XX':
k.gate('rym90', [qD0])
k.gate('rym90', [qD1])
k.gate("wait", [], 0) #alignment workaround
if parity_axis=='YY':
k.gate('rxm90', [qD0])
k.gate('rxm90', [qD1])
k.gate("wait", [], 0) #alignment workaround
k.gate('rym90', [qA])
# k.gate('ry90', [qD0])
# k.gate('ry90', [qD1])
k.gate("wait", [], 0) #alignment workaround
# k.gate(flux_codeword, [qA, qD1])
k.gate(flux_codeword, [qA, qD0])
k.gate("wait", [], 0)
# if echo:
# k.gate('ry180', [qA])
k.gate(flux_codeword, [qA, qD1])
k.gate("wait", [], 0) #alignment workaround
k.gate('ry90', [qA])
# k.gate('rym90', [qD0])
# k.gate('rym90', [qD1])
k.gate("wait", [], 0)
if parity_axis=='XX':
k.gate('ry90', [qD0])
k.gate('ry90', [qD1])
k.gate("wait", [], 0) #alignment workaround
elif parity_axis=='YY':
k.gate('rx90', [qD0])
k.gate('rx90', [qD1])
k.gate("wait", [], 0) #alignment workaround
if (i is not number_of_repetitions-1) or (tomo_after): #last mmt can be multiplexed
k.gate("wait", [], 0)
k.measure(qA)
if echo_during_ancilla_mmt:
k.gate('ry180', [qD0])
k.gate('ry180', [qD1])
k.gate('wait', [qA, qD0, qD1], int(ro_time*1e9))
k.gate("wait", [], 0) #separating parity from tomo
if idling_rounds!=0:
for j in np.arange(idling_rounds):
k.gate("wait", [], int(idling_time_echo*1e9)) #alignment workaround
if echo_during_ancilla_mmt:
k.gate('ry180', [qD0])
k.gate('ry180', [qD1])
k.gate("wait", [], int((idling_time-idling_time_echo-20e-9)*1e9)) #alignment workaround
#tomography
if tomo:
k.gate("wait", [qD1, qD0], 0) #alignment workaround
k.gate(p_q0, [qD1])
k.gate(p_q1, [qD0])
k.gate("wait", [qD1, qD0], 0) #alignment workaround
# measure
if not tomo_after:
k.gate("wait", [], 0) #alignment workaround
k.measure(qA)
k.measure(qD0)
k.measure(qD1)
p.add_kernel(k)
if tomo:
#only add calbration points when doing tomography
interleaved_delay=ro_time
if echo_during_ancilla_mmt:
interleaved_delay=ro_time
if tomo_after:
p = oqh.add_two_q_cal_points(p, q0=qD0, q1=qD1, reps_per_cal_pt=7, measured_qubits=[qD0, qD1],
interleaved_measured_qubits=[qA],
interleaved_delay=interleaved_delay,
nr_of_interleaves=initialization_msmt+number_of_repetitions*len(parity_axes))
else:
p = oqh.add_two_q_cal_points(p, q0=qD0, q1=qD1, reps_per_cal_pt=7, measured_qubits=[qD0, qD1, qA],
interleaved_measured_qubits=[qA],
interleaved_delay=interleaved_delay, nr_of_interleaves=initialization_msmt+number_of_repetitions*len(parity_axes)-1)
p = oqh.compile(p)
return p
def conditional_oscillation_seq(q0: int, q1: int,
q2: int = None, q3: int = None,
platf_cfg: str = None,
disable_cz: bool = False,
disabled_cz_duration: int = 60,
cz_repetitions: int = 1,
angles=np.arange(0, 360, 20),
wait_time_before_flux: int = 0,
wait_time_after_flux: int = 0,
add_cal_points: bool = True,
cases: list = ('no_excitation', 'excitation'),
flux_codeword: str = 'cz',
flux_codeword_park: str = None,
parked_qubit_seq: str = 'ground',
disable_parallel_single_q_gates: bool = False):
'''
Sequence used to calibrate flux pulses for CZ gates.
q0 is the oscillating qubit
q1 is the spectator qubit
Timing of the sequence:
q0: X90 -- C-Phase (repet. C-Phase) Rphi90 RO
q1: X180/I -- C-Phase -- X180 RO
q2: X90 -- PARK/C-Phase -- Rphi90 RO
q3: X180/I -- C-Phase -- X180 RO
Args:
q0, q1 (str): names of the addressed qubits
q2, q3 (str): names of optional extra qubit to either park or
apply a CZ to.
flux_codeword (str):
the gate to be applied to the qubit pair q0, q1
flux_codeword_park (str):
optionally park qubits q2 (and q3) with either a 'park' pulse
(single qubit operation on q2) or a 'cz' pulse on q2-q3.
disable_cz (bool): disable CZ gate
cz_repetitions (int): how many cz gates to apply consecutively
angles (array): angles of the recovery pulse
wait_time_after_flux (int): wait time in ns after triggering all flux
pulses
'''
assert parked_qubit_seq in {"ground", "ramsey"}
p = oqh.create_program("conditional_oscillation_seq", platf_cfg)
# These angles correspond to special pi/2 pulses in the lutman
for i, angle in enumerate(angles):
for case in cases:
k = oqh.create_kernel("{}_{}".format(case, angle), p)
k.prepz(q0)
k.prepz(q1)
if q2 is not None:
k.prepz(q2)
if q3 is not None:
k.prepz(q3)
k.gate("wait", [], 0) # alignment workaround
# #################################################################
# Single qubit ** parallel ** gates before flux pulses
# #################################################################
control_qubits = [q1]
if q3 is not None:
# In case of parallel cz
control_qubits.append(q3)
ramsey_qubits = [q0]
if q2 is not None and parked_qubit_seq == "ramsey":
# For parking and parallel cz
ramsey_qubits.append(q2)
if case == "excitation":
# implicit identities otherwise
for q in control_qubits:
k.gate("rx180", [q])
if disable_parallel_single_q_gates:
k.gate("wait", [], 0)
for q in ramsey_qubits:
k.gate("rx90", [q])
if disable_parallel_single_q_gates:
k.gate("wait", [], 0)
k.gate("wait", [], 0) # alignment workaround
# #################################################################
# Flux pulses
# #################################################################
k.gate('wait', [], wait_time_before_flux)
for dummy_i in range(cz_repetitions):
if not disable_cz:
# Parallel flux pulses below
k.gate(flux_codeword, [q0, q1])
# in case of parking and parallel cz
if flux_codeword_park == 'cz':
k.gate(flux_codeword_park, [q2, q3])
elif flux_codeword_park == 'park':
k.gate(flux_codeword_park, [q2])
if q3 is not None:
raise ValueError("Expected q3 to be None")
elif flux_codeword_park is None:
pass
else:
raise ValueError(
'flux_codeword_park "{}" not allowed'.format(
flux_codeword_park))
else:
k.gate("wait", [], 0) #alignment workaround
# k.gate('wait', [q0,q1], wait_time_between + CZ_duration)
k.gate('wait', [q0,q1], 50)
k.gate("wait", [], 0) #alignment workaround
k.gate("wait", [], 0)
k.gate('wait', [], wait_time_after_flux)
# #################################################################
# Single qubit ** parallel ** gates post flux pulses
# #################################################################
if case == "excitation":
for q in control_qubits:
k.gate("rx180", [q])
if disable_parallel_single_q_gates:
k.gate("wait", [], 0)
# cw_idx corresponds to special hardcoded angles in the lutman
# special because the cw phase pulses go in mult of 20 deg
cw_idx = angle // 20 + 9
phi_gate = None
if angle == 90:
phi_gate = 'ry90'
elif angle == 0:
phi_gate = 'rx90'
else:
phi_gate = 'cw_{:02}'.format(cw_idx)
for q in ramsey_qubits:
k.gate(phi_gate, [q])
if disable_parallel_single_q_gates:
k.gate("wait", [], 0)
k.gate('wait', [], 0)
# #################################################################
# Measurement
# #################################################################
if case == 'excitation':
gate = 'rx180'
# if single_q_gates_replace is None else single_q_gates_replace
k.gate("wait", [], 0) #alignment workaround
k.gate(gate, [q1])
# k.gate('i', [q0])
# k.gate("wait", [], 0)
k.measure(q0)
k.measure(q1)
if q2 is not None:
k.measure(q2)
if q3 is not None:
k.measure(q3)
k.gate('wait', [], 0)
p.add_kernel(k)
if add_cal_points:
if q2 is None:
states = ["00", "01", "10", "11"]
else:
states = ["000", "010", "101", "111"]
qubits = [q0, q1] if q2 is None else [q0, q1, q2]
oqh.add_multi_q_cal_points(
p, qubits=qubits, f_state_cal_pt_cw=31,
combinations=states, return_comb=False)
p = oqh.compile(p)
# [2020-06-24] parallel cz not supported (yet)
if add_cal_points:
cal_pts_idx = [361, 362, 363, 364]
else:
cal_pts_idx = []
p.sweep_points = np.concatenate(
[np.repeat(angles, len(cases)), cal_pts_idx])
p.set_sweep_points(p.sweep_points)
return p
def grovers_two_qubit_all_inputs(q0: int, q1: int, platf_cfg: str,
precompiled_flux: bool=True,
second_CZ_delay: int=0,
CZ_duration: int=260,
add_echo_pulses: bool=False,
cal_points: bool=True):
"""
Writes the QASM sequence for Grover's algorithm on two qubits.
Sequence:
q0: G0 - - mY90 - - mY90 - RO
CZ_ij CZ
q1: G1 - - mY90 - - mY90 - RO
whit all combinations of (ij) = omega.
G0 and G1 are Y90 or Y90, depending on the (ij).
Args:
q0_name, q1_name (string):
Names of the qubits to which the sequence is applied.
RO_target (string):
Readout target. Can be a qubit name or 'all'.
precompiled_flux (bool):
Determies if the full waveform for the flux pulses is
precompiled, thus only needing one trigger at the start,
or if every flux pulse should be triggered individually.
add_echo_pulses (bool): if True add's echo pulses before the
second CZ gate.
cal_points (bool):
Whether to add calibration points.
Returns:
qasm_file: a reference to the new QASM file object.
"""
if not precompiled_flux:
raise NotImplementedError('Currently only precompiled flux pulses '
'are supported.')
p = oqh.create_program("grovers_two_qubit_all_inputs", platf_cfg)
for G0 in ['ry90', 'rym90']:
for G1 in ['ry90', 'rym90']:
k = oqh.create_kernel('Gr{}_{}'.format(G0, G1), p)
k.prepz(q0)
k.prepz(q1)
k.gate(G0, [q0])
k.gate(G1, [q1])
k.gate('fl_cw_03', [2, 0]) # flux cw03 is the multi_cz pulse
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# k.gate('fl_cw_00', 2,0)
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], second_CZ_delay//2)
k.gate("wait", [], 0) #alignment workaround
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], second_CZ_delay//2)
k.gate("wait", [], 0) #alignment workaround
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate('wait', [2, 0], CZ_duration)
k.gate('ry90', [q0])
k.gate('ry90', [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
if cal_points:
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
return p
def grovers_two_qubits_repeated(qubits, platf_cfg: str,
nr_of_grover_iterations: int):
"""
Writes the QASM sequence for Grover's algorithm on two qubits.
Sequence:
q0: G0 - - mY90 - - mY90 - RO
CZ CZ
q1: G1 - - mY90 - - mY90 - RO
G0 and G1 are state preparation gates. Here G0 = 'ry90' and G1 = 'rym90'
Parameters:
-----------
qubits: list of int
List of the qubits (indices) to which the sequence is applied.
"""
p = oqh.create_program("grovers_two_qubits_repeated", platf_cfg)
q0 = qubits[-1]
q1 = qubits[-2]
G0 = {"phi": 90, "theta": 90}
G1 = {"phi": 90, "theta": 90}
for i in range(nr_of_grover_iterations):
# k = p.new_kernel('Grover_iteration_{}'.format(i))
k = oqh.create_kernel('Grover_iteration_{}'.format(i), p)
k.prepz(q0)
k.prepz(q1)
# k.prepz()
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# k.rotate(q0, **G0)
# k.rotate(q1, **G1)
for j in range(i):
# Oracle stage
k.gate('cz', [2, 0]) #hardcoded fixme
# k.cz(q0, q1)
# Tagging stage
if (j % 2 == 0):
k.gate('rym90', [q0])
k.gate('rym90', [q1])
# k.ry(q0, -90)
# k.ry(q1, -90)
else:
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# k.ry(q0, 90)
# k.ry(q1, 90)
k.gate('cz', [2, 0]) #hardcoded fixme
# k.cz(q0, q1)
if (j % 2 == 0):
k.gate('ry90', [q0])
k.gate('ry90', [q1])
else:
k.gate('rym90', [q0])
k.gate('rym90', [q1])
# if (j % 2 == 0):
# k.ry(q0, 90)
# k.ry(q1, 90)
# else:
# k.ry(q0, -90)
# k.ry(q1, -90)
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
p = oqh.compile(p)
# p.compile()
return p
def grovers_tomography(q0: int, q1: int, omega: int, platf_cfg: str,
precompiled_flux: bool=True,
cal_points: bool=True, second_CZ_delay: int=260,
CZ_duration: int=260,
add_echo_pulses: bool=False):
"""
Tomography sequence for Grover's algorithm.
omega: int denoting state that the oracle prepares.
"""
if not precompiled_flux:
raise NotImplementedError('Currently only precompiled flux pulses '
'are supported.')
p = oqh.create_program("grovers_tomography",
platf_cfg)
tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
if omega == 0:
G0 = 'ry90'
G1 = 'ry90'
elif omega == 1:
G0 = 'ry90'
G1 = 'rym90'
elif omega == 2:
G0 = 'rym90'
G1 = 'ry90'
elif omega == 3:
G0 = 'rym90'
G1 = 'rym90'
else:
raise ValueError('omega must be in [0, 3]')
for p_q1 in tomo_gates:
for p_q0 in tomo_gates:
k = oqh.create_kernel('Gr{}_{}_tomo_{}_{}'.format(
G0, G1, p_q0, p_q1), p)
k.prepz(q0)
k.prepz(q1)
# Oracle
k.gate(G0, [q0])
k.gate(G1, [q1])
k.gate('fl_cw_03', [2, 0]) # flux cw03 is the multi_cz pulse
# Grover's search
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# k.gate('fl_cw_00', 2[,0])
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], second_CZ_delay//2)
k.gate("wait", [], 0) #alignment workaround
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], second_CZ_delay//2)
k.gate("wait", [], 0) #alignment workaround
if add_echo_pulses:
k.gate('rx180', [q0])
k.gate('rx180', [q1])
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], CZ_duration)
k.gate("wait", [], 0) #alignment workaround
k.gate('ry90', [q0])
k.gate('ry90', [q1])
# tomo pulses
k.gate(p_q1, [q0])
k.gate(p_q0, [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def CZ_poisoned_purity_seq(q0, q1, platf_cfg: str,
nr_of_repeated_gates: int,
cal_points: bool=True):
"""
Creates the |00> + |11> Bell state and does a partial tomography in
order to determine the purity of both qubits.
"""
p = oqh.create_program("CZ_poisoned_purity_seq",
platf_cfg)
tomo_list = ['rxm90', 'rym90', 'i']
for p_pulse in tomo_list:
k = oqh.create_kernel("{}".format(p_pulse), p)
k.prepz(q0)
k.prepz(q1)
# Create a Bell state: |00> + |11>
k.gate('rym90', [q0])
k.gate('ry90', [q1])
k.gate("wait", [], 0) #alignment workaround
for i in range(nr_of_repeated_gates):
k.gate('fl_cw_01', [2, 0])
k.gate("wait", [], 0) #alignment workaround
k.gate('rym90', [q1])
# Perform pulses to measure the purity of both qubits
k.gate(p_pulse, [q0])
k.gate(p_pulse, [q1])
k.measure(q0)
k.measure(q1)
# Implements a barrier to align timings
# k.gate('wait', [q0, q1], 0)
# hardcoded because of openQL #104
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
if cal_points:
# FIXME: replace with standard add cal points function
k = oqh.create_kernel("Cal 00", p)
k.prepz(q0)
k.prepz(q1)
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
k = oqh.create_kernel("Cal 11", p)
k.prepz(q0)
k.prepz(q1)
k.gate("rx180", [q0])
k.gate("rx180", [q1])
k.measure(q0)
k.measure(q1)
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def CZ_state_cycling_light(q0: str, q1: str, N: int=1):
"""
Implements a circuit that performs a permutation over all computational
states. This light version performs this experiment for all 4 possible
input states.
Expected operation:
U (|00>) -> |01>
U (|01>) -> |11>
U (|10>) -> |00>
U (|11>) -> |10>
Args:
q0 (str): name of qubit q0
q1 (str): name of qubit q1
N (int): number of times to apply U
"""
raise NotImplementedError()
# filename = join(base_qasm_path, 'CZ_state_cycling_light.qasm')
# qasm_file = mopen(filename, mode='w')
# qasm_file.writelines('qubit {} \nqubit {} \n'.format(q0, q1))
# U = ''
# U += 'Y90 {} | mY90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# # Input |00>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# # Input |01>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# qasm_file.writelines('X180 {}\n'.format(q0))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# # Input |10>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# qasm_file.writelines('X180 {}\n'.format(q1))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# # Input |11>
# qasm_file.writelines('init_all \n')
# qasm_file.writelines('qwg_trigger_0 {}\n'.format(q0))
# qasm_file.writelines('X180 {} | X180 {}\n'.format(q0, q1))
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
# qasm_file.close()
# return qasm_file
def CZ_restless_state_cycling(q0: str, q1: str, N: int=1):
"""
Implements a circuit that performs a permutation over all computational
states.
Expected operation:
U (|00>) -> |01>
U (|01>) -> |11>
U (|10>) -> |00>
U (|11>) -> |10>
Args:
q0 (str): name of qubit q0
q1 (str): name of qubit q1
N (int): number of times to apply U
"""
raise NotImplementedError()
# filename = join(base_qasm_path, 'CZ_state_cycling_light.qasm')
# qasm_file = mopen(filename, mode='w')
# qasm_file.writelines('qubit {} \nqubit {} \n'.format(q0, q1))
# U = ''
# U += 'Y90 {} | mY90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# U += 'CZ {} {}\n'.format(q0, q1)
# U += 'Y90 {} | Y90 {}\n'.format(q0, q1)
# for n in range(N):
# qasm_file.writelines(U)
# qasm_file.writelines('RO {}\n'.format(q0))
def Chevron_first_manifold(qubit_idx: int, qubit_idx_spec: int,
buffer_time, buffer_time2, flux_cw: int, platf_cfg: str):
"""
Writes output files to the directory specified in openql.
Output directory is set as an attribute to the program for convenience.
Input pars:
qubit_idx: int specifying the target qubit (starting at 0)
qubit_idx_spec: int specifying the spectator qubit
buffer_time :
buffer_time2 :
platf_cfg: filename of the platform config file
Returns:
p: OpenQL Program object containing
"""
p = oqh.create_program("Chevron_first_manifold", platf_cfg)
buffer_nanoseconds = int(round(buffer_time/1e-9))
buffer_nanoseconds2 = int(round(buffer_time2/1e-9))
if flux_cw is None:
flux_cw = 2
k = oqh.create_kernel("Chevron", p)
k.prepz(qubit_idx)
k.gate('rx180', [qubit_idx])
k.gate("wait", [qubit_idx], buffer_nanoseconds)
k.gate("wait", [], 0) #alignment workaround
k.gate('fl_cw_{:02}'.format(flux_cw), [2, 0])
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [qubit_idx], buffer_nanoseconds2)
k.measure(qubit_idx)
k.measure(qubit_idx_spec)
k.gate("wait", [qubit_idx, qubit_idx_spec], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def partial_tomography_cardinal(q0: int, q1: int, cardinal: int, platf_cfg: str,
precompiled_flux: bool=True,
cal_points: bool=True, second_CZ_delay: int=260,
CZ_duration: int=260,
add_echo_pulses: bool=False):
"""
Tomography sequence for Grover's algorithm.
cardinal: int denoting cardinal state prepared.
"""
if not precompiled_flux:
raise NotImplementedError('Currently only precompiled flux pulses '
'are supported.')
p = oqh.create_program("partial_tomography_cardinal",
platf_cfg)
cardinal_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
if (cardinal > 35 or cardinal < 0):
raise ValueError('cardinal must be in [0, 35]')
idx_p0 = cardinal % 6
idx_p1 = ((cardinal - idx_p0)//6) % 6
# cardinal_gates[]
#k.gate(string_of_the_gate, integer_from_qubit)
tomo_gates = [('i', 'i'), ('i', 'rx180'), ('rx180', 'i'), ('rx180', 'rx180'),
('ry90', 'ry90'), ('rym90', 'rym90'), ('rx90', 'rx90'), ('rxm90', 'rxm90')]
for i_g, gates in enumerate(tomo_gates):
idx_g0 = i_g % 6
idx_g1 = ((i_g - idx_g0)//6) % 6
# strings denoting the gates
SP0 = cardinal_gates[idx_p0]
SP1 = cardinal_gates[idx_p1]
t_q0 = gates[1]
t_q1 = gates[0]
k = oqh.create_kernel(
'PT_{}_tomo_{}_{}'.format(cardinal, idx_g0, idx_g1), p)
k.prepz(q0)
k.prepz(q1)
# Cardinal state preparation
k.gate(SP0, [q0])
k.gate(SP1, [q1])
# tomo pulses
# to be taken from list of tuples
k.gate(t_q1, [q0])
k.gate(t_q0, [q1])
k.measure(q0)
k.measure(q1)
k.gate("wait", [], 0) #alignment workaround
k.gate('wait', [2, 0], 0)
k.gate("wait", [], 0) #alignment workaround
p.add_kernel(k)
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1, reps_per_cal_pt=2)
p = oqh.compile(p)
return p
def two_qubit_VQE(q0: int, q1: int, platf_cfg: str):
"""
VQE tomography for two qubits.
Args:
cardinal (int) : index of prep gate
q0, q1 (int) : target qubits for the sequence
"""
tomo_pulses = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
tomo_list_q0 = tomo_pulses
tomo_list_q1 = tomo_pulses
p = oqh.create_program("two_qubit_VQE", platf_cfg)
# Tomography pulses
i = 0
for p_q1 in tomo_list_q1:
for p_q0 in tomo_list_q0:
i += 1
kernel_name = '{}_{}_{}'.format(i, p_q0, p_q1)
k = oqh.create_kernel(kernel_name, p)
k.prepz(q0)
k.prepz(q1)
k.gate('ry180', [q0]) # Y180 gate without compilation
k.gate('i', [q0]) # Y180 gate without compilation
k.gate("wait", [q1], 40)
k.gate("wait", [], 0) #alignment workaround
k.gate('fl_cw_02', [2, 0])
k.gate("wait", [], 0) #alignment workaround
k.gate("wait", [q1], 40)
k.gate(p_q0, [q0]) # compiled z gate+pre_rotation
k.gate(p_q1, [q1]) # pre_rotation
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
# every calibration point is repeated 7 times. This is copied from the
# script for Tektronix driven qubits. I do not know if this repetition
# is important or even necessary here.
p = oqh.add_two_q_cal_points(p, q0=q1, q1=q0, reps_per_cal_pt=7)
p = oqh.compile(p)
return p
def sliding_flux_pulses_seq(
qubits: list, platf_cfg: str,
angles=np.arange(0, 360, 20), wait_time: int=0,
flux_codeword_a: str='fl_cw_01', flux_codeword_b: str='fl_cw_01',
ramsey_axis: str='x',
add_cal_points: bool=True):
"""
Experiment to measure effect flux pulses on each other.
Timing of the sequence:
q0: -- flux_a -- wait -- X90 -- flux_b -- Rphi90 -- RO
q1: -- flux_a -- -- -- flux_b -- -- RO
N.B. q1 only exists to satisfy flux tuples notion in CCL
N.B.2 flux-tuples are now hardcoded to always be tuple [2,0] again
because of OpenQL.
Args:
qubits : list of qubits, LSQ (q0) is last entry in list
platf_cfg : openQL platform config
angles : angles along which to do recovery pulses
wait_time : time in ns after the first flux pulse and before the
first microwave pulse.
flux_codeword_a : flux codeword of the stimulus (1st) pulse
flux_codeword_b : flux codeword of the spectator (2nd) pulse
ramsey_axis : chooses between doing x90 or y90 rotation at the
beginning of Ramsey sequence
add_cal_points : if True adds calibration points at the end
"""
p = oqh.create_program("sliding_flux_pulses_seq", platf_cfg)
k = oqh.create_kernel("sliding_flux_pulses_seq", p)
q0 = qubits[-1]
q1 = qubits[-2]
for i, angle in enumerate(angles):
cw_idx = angle//20 + 9
k.prepz(q0)
k.gate(flux_codeword_a, [2, 0]) # edge hardcoded because of openql
k.gate("wait", [], 0) # alignment workaround
# hardcoded because of flux_tuples, [q1, q0])
k.gate('wait', [q0, q1], wait_time)
if ramsey_axis == 'x':
k.gate('rx90', [q0])
elif ramsey_axis == 'y':
k.gate('ry90', [q0])
else:
raise ValueError('ramsey_axis must be "x" or "y"')
k.gate("wait", [], 0) # alignment workaround
k.gate(flux_codeword_b, [2, 0]) # edge hardcoded because of openql
k.gate("wait", [], 0) # alignment workaround
k.gate('wait', [q0, q1], 60)
# hardcoded because of flux_tuples, [q1, q0])
# hardcoded angles, must be uploaded to AWG
if angle == 90:
# special because the cw phase pulses go in mult of 20 deg
k.gate('ry90', [q0])
else:
k.gate('cw_{:02}'.format(cw_idx), [q0])
k.measure(q0)
k.measure(q1)
# Implements a barrier to align timings
# k.gate('wait', [q0, q1], 0)
# hardcoded barrier because of openQL #104
k.gate('wait', [2, 0], 0)
p.add_kernel(k)
if add_cal_points:
p = oqh.add_two_q_cal_points(p, q0=q0, q1=q1)
p = oqh.compile(p)
if add_cal_points:
cal_pts_idx = [361, 362, 363, 364]
else:
cal_pts_idx = []
p.sweep_points = np.concatenate([angles, cal_pts_idx])
# FIXME: remove try-except, when we depend hardly on >=openql-0.6
try:
p.set_sweep_points(p.sweep_points)
except TypeError:
# openql-0.5 compatibility
p.set_sweep_points(p.sweep_points, len(p.sweep_points))
return p
def two_qubit_state_tomography(qubit_idxs,
bell_state,
product_state,
platf_cfg,
wait_after_flux: float=None,
flux_codeword: str='cz'):
p = oqh.create_program("state_tomography_2Q_{}_{}_{}".format(product_state,qubit_idxs[0], qubit_idxs[1]), platf_cfg)
q0 = qubit_idxs[0]
q1 = qubit_idxs[1]
calibration_points = ['00', '01', '10', '11']
measurement_pre_rotations = ['II', 'IF', 'FI', 'FF']
bases = ['X', 'Y', 'Z']
## Explain this ?
bases_comb = [basis_0+basis_1 for basis_0 in bases for basis_1 in bases]
combinations = []
combinations += [b+'-'+c for b in bases_comb for c in measurement_pre_rotations]
combinations += calibration_points
state_strings = ['0', '1', '+', '-', 'i', 'j']
state_gate = ['i', 'rx180', 'ry90', 'rym90', 'rxm90', 'rx90']
product_gate = ['0', '0', '0', '0']
for basis in bases_comb:
for pre_rot in measurement_pre_rotations: # tomographic pre-rotation
k = oqh.create_kernel('TFD_{}-basis_{}'.format(basis, pre_rot), p)
for q_idx in qubit_idxs:
k.prepz(q_idx)
# Choose a bell state and set the corresponding preparation pulses
if bell_state is not None:
#
# Q1 |0> --- P1 --o-- A1 -- R1 -- M
# |
# Q0 |0> --- P0 --o-- I -- R0 -- M
if bell_state == 0: # |Phi_m>=|00>-|11>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'ry90'
elif bell_state % 10 == 1: # |Phi_p>=|00>+|11>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'ry90'
elif bell_state % 10 == 2: # |Psi_m>=|01>-|10>
prep_pulse_q0, prep_pulse_q1 = 'ry90', 'rym90'
elif bell_state % 10 == 3: # |Psi_p>=|01>+|10>
prep_pulse_q0, prep_pulse_q1 = 'rym90', 'rym90'
else:
raise ValueError('Bell state {} is not defined.'.format(bell_state))
# Recovery pulse is the same for all Bell states
after_pulse_q1 = 'rym90'
k.gate(prep_pulse_q0, [q0])
k.gate(prep_pulse_q1, [q1])
k.gate("wait", [], 0)# Empty list generates barrier for all qubits in platf. only works with 0.8.0
# k.gate('cz', [q0, q1])
k.gate(flux_codeword, [q0, q1])
k.gate("wait", [], 0)
# after-rotations
k.gate(after_pulse_q1, [q1])
# possibly wait
if wait_after_flux is not None:
k.gate("wait", [q0, q1], round(wait_after_flux*1e9))
k.gate("wait", [], 0)
if product_state is not None:
for i, string in enumerate(product_state):
product_gate[i] = state_gate[state_strings.index(string)]
k.gate(product_gate[0], [q0])
k.gate(product_gate[1], [q1])
k.gate('wait', [], 0)
if (product_state is not None) and (bell_state is not None):
raise ValueError('Confusing requirements, both state {} and bell-state {}'.format(product_state,bell_state))
# tomographic pre-rotations
for rot_idx in range(2):
q_idx = qubit_idxs[rot_idx]
flip = pre_rot[rot_idx]
qubit_basis = basis[rot_idx]
# Basis rotations take the operator Z onto (Ri* Z Ri):
# Z -Z X -X -Y Y
# FLIPS I F I F I F
# BASIS Z Z X X Y Y
# tomo_gates = ['i', 'rx180', 'ry90', 'rym90', 'rx90', 'rxm90']
prerot_Z = 'i'
prerot_mZ = 'rx180'
prerot_X = 'rym90'
prerot_mX = 'ry90'
prerot_Y = 'rx90'
prerot_mY = 'rxm90'
if flip == 'I' and qubit_basis == 'Z':
k.gate(prerot_Z, [q_idx])
elif flip == 'F' and qubit_basis == 'Z':
k.gate(prerot_mZ, [q_idx])
elif flip == 'I' and qubit_basis == 'X':
k.gate(prerot_X, [q_idx])
elif flip == 'F' and qubit_basis == 'X':
k.gate(prerot_mX, [q_idx])
elif flip == 'I' and qubit_basis == 'Y':
k.gate(prerot_Y, [q_idx])
elif flip == 'F' and qubit_basis == 'Y':
k.gate(prerot_mY, [q_idx])
else:
raise ValueError("flip {} and basis {} not understood".format(flip,basis))
k.gate('i', [q_idx])
k.gate('wait', [], 0)
for q_idx in qubit_idxs:
k.measure(q_idx)
k.gate('wait', [], 0)
p.add_kernel(k)
for cal_pt in calibration_points:
k = oqh.create_kernel('Cal_{}'.format(cal_pt), p)
for q_idx in qubit_idxs:
k.prepz(q_idx)
k.gate('wait', [], 0)
for cal_idx, state in enumerate(cal_pt):
q_idx = qubit_idxs[cal_idx]
if state == '1':
k.gate('rx180', [q_idx])
k.gate('wait', [], 0) # barrier guarantees allignment
for q_idx in qubit_idxs:
k.measure(q_idx)
k.gate('wait', [], 0)
p.add_kernel(k)
p = oqh.compile(p)
p.combinations = combinations
return p
def multi_qubit_Depletion(qubits: list, platf_cfg: str,
time: float):
"""
Performs a measurement pulse and wait time followed by a simultaneous ALLXY on the
specified qubits:
|q0> - RO <--wait--> P0 - P1 - RO
|q1> - RO <--time--> P0 - P1 - RO
.
.
.
args:
qubits : List of qubits numbers.
time : wait time (s) after readout pulse.
"""
p = oqh.create_program('multi_qubit_Depletion', platf_cfg)
pulse_combinations = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'],
['rx180', 'ry180'], ['ry180', 'rx180'],
['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'],
['ry90', 'rx90'], ['rx90', 'ry180'],
['ry90', 'rx180'],
['rx180', 'ry90'], ['ry180', 'rx90'],
['rx90', 'rx180'],
['rx180', 'rx90'], ['ry90', 'ry180'],
['ry180', 'ry90'],
['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'],
['ry90', 'ry90']]
for i, pulse_comb in enumerate(pulse_combinations):
for j in range(2): #double points
k = oqh.create_kernel('Depletion_{}_{}'.format(j, i), p)
for qubit in qubits:
k.prepz(qubit)
k.measure(qubit)
wait_nanoseconds = int(round(time/1e-9))
for qubit in qubits:
k.gate("wait", [qubit], wait_nanoseconds)
if sequence_type == 'simultaneous':
for qubit in qubits:
k.gate(pulse_comb[0], [qubit])
k.gate(pulse_comb[1], [qubit])
k.measure(qubit)
p.add_kernel(k)
p = oqh.compile(p)
return p
def two_qubit_Depletion(q0: int, q1: int, platf_cfg: str,
time: float,
sequence_type='sequential',
double_points: bool=False):
"""
"""
p = oqh.create_program('two_qubit_Depletion', platf_cfg)
pulse_combinations = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'],
['rx180', 'ry180'], ['ry180', 'rx180'],
['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'],
['ry90', 'rx90'], ['rx90', 'ry180'],
['ry90', 'rx180'],
['rx180', 'ry90'], ['ry180', 'rx90'],
['rx90', 'rx180'],
['rx180', 'rx90'], ['ry90', 'ry180'],
['ry180', 'ry90'],
['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'],
['ry90', 'ry90']]
pulse_combinations_tiled = pulse_combinations + pulse_combinations
if double_points:
pulse_combinations = [val for val in pulse_combinations
for _ in (0, 1)]
pulse_combinations_q0 = pulse_combinations
pulse_combinations_q1 = pulse_combinations_tiled
i = 0
for pulse_comb_q0, pulse_comb_q1 in zip(pulse_combinations_q0,
pulse_combinations_q1):
i += 1
k = oqh.create_kernel('AllXY_{}'.format(i), p)
k.prepz(q0)
k.prepz(q1)
k.measure(q0)
k.measure(q1)
wait_nanoseconds = int(round(time/1e-9))
k.gate("wait", [q0], wait_nanoseconds)
k.gate("wait", [q1], wait_nanoseconds)
# N.B. The identity gates are there to ensure proper timing
if sequence_type == 'interleaved':
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'sandwiched':
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'sequential':
k.gate(pulse_comb_q0[0], [q0])
k.gate('i', [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate('i', [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate('i', [q0])
k.gate(pulse_comb_q1[1], [q1])
elif sequence_type == 'simultaneous':
k.gate(pulse_comb_q0[0], [q0])
k.gate(pulse_comb_q1[0], [q1])
k.gate(pulse_comb_q0[1], [q0])
k.gate(pulse_comb_q1[1], [q1])
else:
raise ValueError("sequence_type {} ".format(sequence_type) +
"['interleaved', 'simultaneous', " +
"'sequential', 'sandwiched']")
k.measure(q0)
k.measure(q1)
p.add_kernel(k)
p = oqh.compile(p)
return p
def Two_qubit_RTE(QX: int , QZ: int, platf_cfg: str,
measurements: int, net='i', start_states: list = ['0'],
ramsey_time_1: int = 120, ramsey_time_2: int = 120,
echo: bool = False):
"""
"""
p = oqh.create_program('RTE', platf_cfg)
for state in start_states:
k = oqh.create_kernel('RTE start state {}'.format(state), p)
k.prepz(QX)
k.prepz(QZ)
if state == '1':
k.gate('rx180', [QX])
k.gate('rx180', [QZ])
k.gate('wait', [QX, QZ], 0)
######################
# Parity check
######################
for m in range(measurements):
# Superposition
k.gate('rx90', [QX])
k.gate('i', [QZ])
# CZ emulation
if echo:
k.gate('wait', [QX, QZ], int((ramsey_time_1-20)/2) )
k.gate('rx180', [QX])
k.gate('i', [QZ])
k.gate('wait', [QX, QZ], int((ramsey_time_1-20)/2) )
else:
k.gate('wait', [QX, QZ], ramsey_time_1)
# intermidate sequential
if net == 'pi' or echo:
k.gate('rx90', [QX])
else:
k.gate('rxm90', [QX])
k.gate('i', [QZ])
k.gate('i', [QX])
k.gate('rx90', [QZ])
# CZ emulation
if echo:
k.gate('wait', [QX, QZ], int((ramsey_time_2-20)/2) )
k.gate('rx180', [QZ])
k.gate('i', [QX])
k.gate('wait', [QX, QZ], int((ramsey_time_2-20)/2) )
else:
k.gate('wait', [QX, QZ], ramsey_time_2)
# Recovery pulse
k.gate('i', [QX])
if net == 'pi' or echo:
k.gate('rx90', [QZ])
else:
k.gate('rxm90', [QZ])
k.gate('wait', [QX, QZ], 0)
# Measurement
k.measure(QX)
k.measure(QZ)
p.add_kernel(k)
p = oqh.compile(p)
return p
def Two_qubit_RTE_pipelined(QX:int, QZ:int, QZ_d:int, platf_cfg: str,
measurements:int, start_states:list = ['0'],
ramsey_time: int = 120, echo:bool = False):
"""
"""
p = oqh.create_program('RTE_pipelined', platf_cfg)
for state in start_states:
k = oqh.create_kernel('RTE pip start state {}'.format(state), p)
k.prepz(QX)
k.prepz(QZ)
if state == '1':
k.gate('rx180', [QX])
k.gate('rx180', [QZ])
k.gate('wait', [QX, QZ, QZ_d], 0)
# k.gate('wait', [QX], 0)
######################
# Parity check
#####################
for m in range(measurements):
k.measure(QZ_d)
if echo is True:
k.gate('wait', [QZ_d], ramsey_time+60)
else:
k.gate('wait', [QZ_d], ramsey_time+40)
k.gate('rx90', [QZ])
if echo is True:
k.gate('wait', [QZ], ramsey_time/2)
k.gate('rx180', [QZ])
k.gate('wait', [QZ], ramsey_time/2)
k.gate('rx90', [QZ])
else:
k.gate('wait', [QZ], ramsey_time)
k.gate('rxm90', [QZ])
k.gate('wait', [QZ], 500)
k.measure(QX)
k.gate('rx90', [QX])
if echo is True:
k.gate('wait', [QX], ramsey_time/2)
k.gate('rx180', [QX])
k.gate('wait', [QX], ramsey_time/2)
k.gate('rx90', [QX])
else:
k.gate('wait', [QX], ramsey_time)
k.gate('rxm90', [QX])
k.gate('wait', [QX, QZ, QZ_d], 0)
p.add_kernel(k)
p = oqh.compile(p)
return p
def Ramsey_cross(wait_time: int,
angles: list,
q_rams: int,
q_meas: int,
echo: bool,
platf_cfg: str,
initial_state: str = '0'):
"""
q_target is ramseyed
q_spec is measured
"""
p = oqh.create_program("Ramsey_msmt_induced_dephasing", platf_cfg)
for i, angle in enumerate(angles[:-4]):
cw_idx = angle//20 + 9
k = oqh.create_kernel("Ramsey_azi_"+str(angle), p)
k.prepz(q_rams)
k.prepz(q_meas)
k.gate("wait", [], 0)
k.gate('rx90', [q_rams])
# k.gate("wait", [], 0)
k.measure(q_rams)
if echo:
k.gate("wait", [q_rams], round(wait_time/2)-20)
k.gate('rx180', [q_rams])
k.gate("wait", [q_rams], round(wait_time/2))
else:
k.gate("wait", [q_rams], wait_time-20)
if angle == 90:
k.gate('ry90', [q_rams])
elif angle == 0:
k.gate('rx90', [q_rams])
else:
k.gate('cw_{:02}'.format(cw_idx), [q_rams])
# k.measure(q_rams)
if initial_state == '1':
k.gate('rx180', [q_meas])
k.measure(q_meas)
if echo:
k.gate("wait", [q_meas], wait_time+20)
else:
k.gate("wait", [q_meas], wait_time)
k.gate("wait", [], 0)
p.add_kernel(k)
# adding the calibration points
oqh.add_single_qubit_cal_points(p, qubit_idx=q_rams)
p = oqh.compile(p)
return p
def TEST_RTE(QX:int , QZ:int, platf_cfg: str,
measurements:int):
"""
"""
p = oqh.create_program('Multi_RTE', platf_cfg)
k = oqh.create_kernel('Multi_RTE', p)
k.prepz(QX)
k.prepz(QZ)
######################
# Parity check
######################
for m in range(measurements):
# Superposition
k.gate('ry90', [QX])
k.gate('i', [QZ])
# CZ emulation
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
# CZ emulation
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
# intermidate sequential
k.gate('rym90', [QX])
k.gate('i', [QZ])
k.gate('i', [QX])
k.gate('ry90', [QZ])
# CZ emulation
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
# CZ emulation
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
k.gate('i', [QZ, QX])
# Recovery pulse
k.gate('i', [QX])
k.gate('rym90', [QZ])
# Measurement
k.measure(QX)
k.measure(QZ)
p.add_kernel(k)
p = oqh.compile(p)
return p
def multi_qubit_AllXY(qubits_idx: list, platf_cfg: str, double_points: bool = True,analyze = True):
"""
Used for AllXY measurement and calibration for multiple qubits simultaneously.
args:
qubits_idx: list of qubit indeces
qubits: list of qubit names
platf_cfg:
double_points: measure each gate combination twice
analyze:
"""
p = oqh.create_program("Multi_qubit_AllXY", platf_cfg)
allXY = [['i', 'i'], ['rx180', 'rx180'], ['ry180', 'ry180'],
['rx180', 'ry180'], ['ry180', 'rx180'],
['rx90', 'i'], ['ry90', 'i'], ['rx90', 'ry90'],
['ry90', 'rx90'], ['rx90', 'ry180'], ['ry90', 'rx180'],
['rx180', 'ry90'], ['ry180', 'rx90'], ['rx90', 'rx180'],
['rx180', 'rx90'], ['ry90', 'ry180'], ['ry180', 'ry90'],
['rx180', 'i'], ['ry180', 'i'], ['rx90', 'rx90'],
['ry90', 'ry90']]
# this should be implicit
# FIXME: remove try-except, when we depend hard on >=openql-0.6
try:
p.set_sweep_points(np.arange(len(allXY), dtype=float))
except TypeError:
# openql-0.5 compatibility
p.set_sweep_points(np.arange(len(allXY), dtype=float), len(allXY))
for i, xy in enumerate(allXY):
if double_points:
js = 2
else:
js = 1
for j in range(js):
k = oqh.create_kernel("AllXY_{}_{}".format(i, j), p)
for qubit in qubits_idx:
k.prepz(qubit)
k.gate(xy[0], [qubit])
k.gate(xy[1], [qubit])
k.measure(qubit)
p.add_kernel(k)
p = oqh.compile(p)
return p
|
SOM-st/RTruffleSOM
|
refs/heads/master
|
src/som/vm/universe.py
|
2
|
from rpython.rlib.debug import make_sure_not_resized
from rpython.rlib.rbigint import rbigint
from rpython.rlib.rrandom import Random
from rpython.rlib import jit
from som.interp_type import is_ast_interpreter
if is_ast_interpreter():
from som.vmobjects.object_with_layout import ObjectWithLayout as Object
else:
from som.vmobjects.object import Object
from som.vmobjects.clazz import Class
from som.vmobjects.array_strategy import Array
from som.vmobjects.object_without_fields import ObjectWithoutFields
from som.vmobjects.symbol import Symbol
from som.vmobjects.integer import Integer
from som.vmobjects.string import String
from som.vmobjects.block_ast import block_evaluation_primitive
from som.vmobjects.biginteger import BigInteger
from som.vmobjects.double import Double
from som.vm.globals import nilObject, trueObject, falseObject
import som.compiler.sourcecode_compiler as sourcecode_compiler
from som.vm.ast.shell import Shell as AstShell
# from som.vm.bc.shell import Shell as BcShell
import os
import time
from rlib.exit import Exit
from rlib.osext import path_split
class Assoc(object):
_immutable_fields_ = ["_global_name", "_value?"]
def __init__(self, global_name, value):
self._global_name = global_name
self._value = value
def get_value(self):
return self._value
def set_value(self, value):
self._value = value
def __str__(self):
return "(%s => %s)" % (self._global_name, self._value)
class Universe(object):
_immutable_fields_ = [
"objectClass",
"classClass",
"metaclassClass",
"nilClass",
"integerClass",
"arrayClass",
"methodClass",
"symbolClass",
"primitiveClass",
"systemClass",
"blockClass",
"blockClasses[*]",
"stringClass",
"doubleClass",
"_symbol_table",
"_globals",
"_object_system_initialized"]
def __init__(self, avoid_exit = False):
self._symbol_table = {}
self._globals = {}
self.objectClass = None
self.classClass = None
self.metaclassClass = None
self.nilClass = None
self.integerClass = None
self.arrayClass = None
self.methodClass = None
self.symbolClass = None
self.primitiveClass = None
self.systemClass = None
self.blockClass = None
self.blockClasses = None
self.stringClass = None
self.doubleClass = None
self._last_exit_code = 0
self._avoid_exit = avoid_exit
self._dump_bytecodes = False
self.classpath = None
self.start_time = time.time() # a float of the time in seconds
self.random = Random(abs(int(time.clock() * time.time())))
self._object_system_initialized = False
def exit(self, error_code):
if self._avoid_exit:
self._last_exit_code = error_code
else:
raise Exit(error_code)
def last_exit_code(self):
return self._last_exit_code
def execute_method(self, class_name, selector):
self._initialize_object_system()
clazz = self.load_class(self.symbol_for(class_name))
if clazz is None:
raise Exception("Class " + class_name + " could not be loaded.")
# Lookup the invokable on class
invokable = clazz.get_class(self).lookup_invokable(self.symbol_for(selector))
if invokable is None:
raise Exception("Lookup of " + selector + " failed in class " + class_name)
return self._start_method_execution(clazz, invokable)
def interpret(self, arguments):
# Check for command line switches
arguments = self.handle_arguments(arguments)
# Initialize the known universe
system_object = self._initialize_object_system()
# Start the shell if no filename is given
if len(arguments) == 0:
return self._start_shell()
else:
arguments_array = self.new_array_with_strings(arguments)
initialize = self.systemClass.lookup_invokable(self.symbol_for("initialize:"))
return self._start_execution(system_object, initialize, arguments_array)
def handle_arguments(self, arguments):
got_classpath = False
remaining_args = []
i = 0
while i < len(arguments):
if arguments[i] == "-cp":
if i + 1 >= len(arguments):
self._print_usage_and_exit()
self.setup_classpath(arguments[i + 1])
i += 1 # skip class path
got_classpath = True
elif arguments[i] == "-d":
self._dump_bytecodes = True
elif arguments[i] in ["-h", "--help", "-?"]:
self._print_usage_and_exit()
else:
remaining_args.append(arguments[i])
i += 1
if not got_classpath:
# Get the default class path of the appropriate size
self.classpath = self._default_classpath()
# check remaining args for class paths, and strip file extension
i = 0
while i < len(remaining_args):
split = self._get_path_class_ext(remaining_args[i])
if split[0] != "": # there was a path
self.classpath.insert(0, split[0])
remaining_args[i] = split[1]
i += 1
return remaining_args
def setup_classpath(self, cp):
self.classpath = cp.split(os.pathsep)
@staticmethod
def _default_classpath():
return ['.']
# take argument of the form "../foo/Test.som" and return
# "../foo", "Test", "som"
@staticmethod
def _get_path_class_ext(path):
return path_split(path)
def _print_usage_and_exit(self):
# Print the usage
std_println("Usage: som [-options] [args...] ")
std_println(" ")
std_println("where options include: ")
std_println(" -cp <directories separated by " + os.pathsep + ">")
std_println(" set search path for application classes")
std_println(" -d enable disassembling")
std_println(" -h print this help")
# Exit
self.exit(0)
def _initialize_object_system(self):
# Allocate the Metaclass classes
self.metaclassClass = self.new_metaclass_class()
# Allocate the rest of the system classes
self.objectClass = self.new_system_class()
self.nilClass = self.new_system_class()
self.classClass = self.new_system_class()
self.arrayClass = self.new_system_class()
self.symbolClass = self.new_system_class()
self.methodClass = self.new_system_class()
self.integerClass = self.new_system_class()
self.primitiveClass = self.new_system_class()
self.stringClass = self.new_system_class()
self.doubleClass = self.new_system_class()
# Setup the class reference for the nil object
nilObject.set_class(self.nilClass)
# Initialize the system classes
self._initialize_system_class(self.objectClass, None, "Object")
self._initialize_system_class(self.classClass, self.objectClass, "Class")
self._initialize_system_class(self.metaclassClass, self.classClass, "Metaclass")
self._initialize_system_class(self.nilClass, self.objectClass, "Nil")
self._initialize_system_class(self.arrayClass, self.objectClass, "Array")
self._initialize_system_class(self.methodClass, self.objectClass, "Method")
self._initialize_system_class(self.integerClass, self.objectClass, "Integer")
self._initialize_system_class(self.primitiveClass, self.objectClass, "Primitive")
self._initialize_system_class(self.stringClass, self.objectClass, "String")
self._initialize_system_class(self.symbolClass, self.stringClass, "Symbol")
self._initialize_system_class(self.doubleClass, self.objectClass, "Double")
# Load methods and fields into the system classes
self._load_system_class(self.objectClass)
self._load_system_class(self.classClass)
self._load_system_class(self.metaclassClass)
self._load_system_class(self.nilClass)
self._load_system_class(self.arrayClass)
self._load_system_class(self.methodClass)
self._load_system_class(self.symbolClass)
self._load_system_class(self.integerClass)
self._load_system_class(self.primitiveClass)
self._load_system_class(self.stringClass)
self._load_system_class(self.doubleClass)
# Load the generic block class
self.blockClass = self.load_class(self.symbol_for("Block"))
# Setup the true and false objects
trueClassName = self.symbol_for("True")
trueClass = self.load_class(trueClassName)
trueObject.set_class(trueClass)
falseClassName = self.symbol_for("False")
falseClass = self.load_class(falseClassName)
falseObject.set_class(falseClass)
# Load the system class and create an instance of it
self.systemClass = self.load_class(self.symbol_for("System"))
system_object = self.new_instance(self.systemClass)
# Put special objects and classes into the dictionary of globals
self.set_global(self.symbol_for("nil"), nilObject)
self.set_global(self.symbol_for("true"), trueObject)
self.set_global(self.symbol_for("false"), falseObject)
self.set_global(self.symbol_for("system"), system_object)
self.set_global(self.symbol_for("System"), self.systemClass)
self.set_global(self.symbol_for("Block"), self.blockClass)
self.set_global(self.symbol_for("Nil"), self.nilClass)
self.set_global( trueClassName, trueClass)
self.set_global(falseClassName, falseClass)
self.blockClasses = [self.blockClass] + \
[self._make_block_class(i) for i in [1, 2, 3]]
self._object_system_initialized = True
return system_object
def is_object_system_initialized(self):
return self._object_system_initialized
@jit.elidable
def symbol_for(self, string):
# Lookup the symbol in the symbol table
result = self._symbol_table.get(string, None)
if result is not None:
return result
# Create a new symbol and return it
result = self._new_symbol(string)
return result
@staticmethod
def new_array_with_length(length):
return Array.from_size(length)
@staticmethod
def new_array_from_list(values):
make_sure_not_resized(values)
return Array.from_values(values)
@staticmethod
def new_array_with_strings(strings):
values = [Universe.new_string(s) for s in strings]
return Array.from_objects(values)
def new_class(self, class_class):
# Allocate a new class and set its class to be the given class class
return Class(self, class_class.get_number_of_instance_fields(), class_class)
@staticmethod
def new_instance(instance_class):
num_fields = instance_class.get_number_of_instance_fields()
if num_fields == 0:
return ObjectWithoutFields(instance_class)
else:
return Object(instance_class, num_fields)
@staticmethod
def new_integer(value):
assert isinstance(value, int)
return Integer(value)
@staticmethod
def new_biginteger(value):
assert isinstance(value, rbigint)
return BigInteger(value)
@staticmethod
def new_double(value):
return Double(value)
def new_metaclass_class(self):
# Allocate the metaclass classes
class_class = Class(self, 0, None)
result = Class(self, 0, class_class)
# Setup the metaclass hierarchy
result.get_class(self).set_class(result)
return result
@staticmethod
def new_string(embedded_string):
return String(embedded_string)
def _new_symbol(self, string):
result = Symbol(string)
# Insert the new symbol into the symbol table
self._symbol_table[string] = result
return result
def new_system_class(self):
# Allocate the new system class
system_class_class = Class(self, 0, None)
system_class = Class(self, 0, system_class_class)
# Setup the metaclass hierarchy
system_class.get_class(self).set_class(self.metaclassClass)
return system_class
def _initialize_system_class(self, system_class, super_class, name):
# Initialize the superclass hierarchy
if super_class:
system_class.set_super_class(super_class)
system_class.get_class(self).set_super_class(
super_class.get_class(self))
else:
system_class.get_class(self).set_super_class(self.classClass)
# Initialize the array of instance fields
system_class.set_instance_fields(self.new_array_with_length(0))
system_class.get_class(self).set_instance_fields(self.new_array_with_length(0))
# Initialize the array of instance invokables
system_class.set_instance_invokables(self.new_array_with_length(0))
system_class.get_class(self).set_instance_invokables(self.new_array_with_length(0))
# Initialize the name of the system class
system_class.set_name(self.symbol_for(name))
system_class.get_class(self).set_name(self.symbol_for(name + " class"))
# Insert the system class into the dictionary of globals
self.set_global(system_class.get_name(), system_class)
def get_global(self, name):
# Return the global with the given name if it's in the dictionary of globals
# if not, return None
jit.promote(self)
assoc = self._get_global(name)
if assoc:
return assoc.get_value()
else:
return None
@jit.elidable
def _get_global(self, name):
return self._globals.get(name, None)
def set_global(self, name, value):
self.get_globals_association(name).set_value(value)
@jit.elidable_promote("all")
def has_global(self, name):
return name in self._globals
@jit.elidable_promote("all")
def get_globals_association(self, name):
assoc = self._globals.get(name, None)
if assoc is None:
assoc = Assoc(name, nilObject)
self._globals[name] = assoc
return assoc
def _get_block_class(self, number_of_arguments):
return self.blockClasses[number_of_arguments]
def _make_block_class(self, number_of_arguments):
# Compute the name of the block class with the given number of
# arguments
name = self.symbol_for("Block" + str(number_of_arguments))
# Get the block class for blocks with the given number of arguments
result = self._load_class(name, None)
# Add the appropriate value primitive to the block class
result.add_instance_primitive(
block_evaluation_primitive(number_of_arguments, self), True)
# Insert the block class into the dictionary of globals
self.set_global(name, result)
# Return the loaded block class
return result
def load_class(self, name):
# Check if the requested class is already in the dictionary of globals
result = self.get_global(name)
if result is not None:
return result
# Load the class
result = self._load_class(name, None)
self._load_primitives(result, False)
self.set_global(name, result)
return result
@staticmethod
def _load_primitives(clazz, is_system_class):
if not clazz: return
if clazz.has_primitives() or is_system_class:
clazz.load_primitives(not is_system_class)
def _load_system_class(self, system_class):
# Load the system class
result = self._load_class(system_class.get_name(), system_class)
if not result:
error_println(system_class.get_name().get_embedded_string()
+ " class could not be loaded. It is likely that the"
+ " class path has not been initialized properly."
+ " Please make sure that the '-cp' parameter is given on the command-line.")
self.exit(200)
self._load_primitives(result, True)
def _load_class(self, name, system_class):
# Try loading the class from all different paths
for cpEntry in self.classpath:
try:
# Load the class from a file and return the loaded class
result = sourcecode_compiler.compile_class_from_file(
cpEntry, name.get_embedded_string(), system_class, self)
if self._dump_bytecodes:
from som.compiler.disassembler import dump
dump(result.get_class(self))
dump(result)
return result
except IOError:
# Continue trying different paths
pass
# The class could not be found.
return None
def load_shell_class(self, stmt):
# Load the class from a stream and return the loaded class
result = sourcecode_compiler.compile_class_from_string(stmt, None, self)
if self._dump_bytecodes:
from som.compiler.disassembler import dump
dump(result)
return result
class _ASTUniverse(Universe):
def _start_shell(self):
shell = AstShell(self)
return shell.start()
def _start_execution(self, system_object, initialize, arguments_array):
return initialize.invoke(system_object, [arguments_array])
def _start_method_execution(self, clazz, invokable):
return invokable.invoke(clazz, [])
class _BCUniverse(Universe):
def __init__(self, avoid_exit = False):
self._interpreter = Interpreter(self)
Universe.__init__(self, avoid_exit)
def get_interpreter(self):
return self._interpreter
def _start_shell(self):
bootstrap_method = create_bootstrap_method(self)
shell = BcShell(self, self._interpreter, bootstrap_method)
return shell.start()
def _start_execution(self, system_object, initialize, arguments_array):
bootstrap_method = create_bootstrap_method(self)
bootstrap_frame = create_bootstrap_frame(bootstrap_method, system_object, arguments_array)
# Lookup the initialize invokable on the system class
return initialize.invoke(bootstrap_frame, self._interpreter)
def _start_method_execution(self, clazz, invokable):
bootstrap_method = create_bootstrap_method(self)
bootstrap_frame = create_bootstrap_frame(bootstrap_method, clazz)
invokable.invoke(bootstrap_frame, self._interpreter)
return bootstrap_frame.pop()
def _initialize_object_system(self):
system_object = Universe._initialize_object_system(self)
self._interpreter.initialize_known_quick_sends()
return system_object
def create_universe(avoid_exit = False):
if is_ast_interpreter():
return _ASTUniverse(avoid_exit)
else:
return _BCUniverse(avoid_exit)
_current = create_universe()
def error_print(msg):
os.write(2, msg or "")
def error_println(msg = ""):
os.write(2, msg + "\n")
def std_print(msg):
os.write(1, msg or "")
def std_println(msg = ""):
os.write(1, msg + "\n")
def main(args):
jit.set_param(None, 'trace_limit', 15000)
u = _current
u.interpret(args[1:])
u.exit(0)
def get_current():
return _current
if __name__ == '__main__':
raise RuntimeError("Universe should not be used as main anymore")
|
herove/dotfiles
|
refs/heads/master
|
sublime/Packages/SublimeLinter/sublimelinter/modules/puppet.py
|
5
|
import re
from base_linter import BaseLinter, INPUT_METHOD_TEMP_FILE
CONFIG = {
'language': 'Puppet',
'executable': 'puppet',
'lint_args': ['parser', 'validate', '--color=false', '{filename}'],
'test_existence_args': '-V',
'input_method': INPUT_METHOD_TEMP_FILE
}
class Linter(BaseLinter):
def parse_errors(self, view, errors, lines, errorUnderlines, violationUnderlines, warningUnderlines, errorMessages, violationMessages, warningMessages):
for line in errors.splitlines():
match = re.match(r'[Ee]rr(or)?: (?P<error>.+?(Syntax error at \'(?P<near>.+?)\'; expected \'.+\')) at /.+?:(?P<line>\d+)?', line)
if not match:
match = re.match(r'[Ee]rr(or)?: (?P<error>.+?(Could not match (?P<near>.+?))?) at /.+?:(?P<line>\d+)?', line)
if not match:
match = re.match(r'(ERROR|WARNING): (?P<error>.+?) on line (?P<line>\d+)?', line)
if match:
error, line = match.group('error'), match.group('line')
lineno = int(line)
try:
near = match.group('near')
except IndexError:
near = ''
if near:
error = '{0}, near "{1}"'.format(error, near)
self.underline_regex(view, lineno, '(?P<underline>{0})'.format(re.escape(near)), lines, errorUnderlines)
self.add_message(lineno, lines, error, errorMessages)
|
mohittahiliani/adaptive-RED-ns3
|
refs/heads/master
|
waf-tools/relocation.py
|
78
|
#! /usr/bin/env python
# encoding: utf-8
"""
Waf 1.6
Try to detect if the project directory was relocated, and if it was,
change the node representing the project directory. Just call:
waf configure build
Note that if the project directory name changes, the signatures for the tasks using
files in that directory will change, causing a partial build.
"""
from __future__ import print_function
import os
from waflib import Build, ConfigSet, Task, Utils, Errors
from waflib.TaskGen import feature, before_method, after_method
EXTRA_LOCK = '.old_srcdir'
old1 = Build.BuildContext.store
def store(self):
old1(self)
db = os.path.join(self.variant_dir, EXTRA_LOCK)
env = ConfigSet.ConfigSet()
env.SRCDIR = self.srcnode.abspath()
env.store(db)
Build.BuildContext.store = store
old2 = Build.BuildContext.init_dirs
def init_dirs(self):
if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
srcdir = None
db = os.path.join(self.variant_dir, EXTRA_LOCK)
env = ConfigSet.ConfigSet()
try:
env.load(db)
srcdir = env.SRCDIR
except:
pass
if srcdir:
d = self.root.find_node(srcdir)
if d and srcdir != self.top_dir and getattr(d, 'children', ''):
srcnode = self.root.make_node(self.top_dir)
print(("relocating the source directory %r -> %r" % (srcdir, self.top_dir)))
srcnode.children = {}
for (k, v) in list(d.children.items()):
srcnode.children[k] = v
v.parent = srcnode
d.children = {}
old2(self)
Build.BuildContext.init_dirs = init_dirs
def uid(self):
try:
return self.uid_
except AttributeError:
# this is not a real hot zone, but we want to avoid surprises here
m = Utils.md5()
up = m.update
up(self.__class__.__name__.encode())
for x in self.inputs + self.outputs:
up(x.path_from(x.ctx.srcnode).encode())
self.uid_ = m.digest()
return self.uid_
Task.Task.uid = uid
@feature('c', 'cxx', 'd', 'go', 'asm', 'fc', 'includes')
@after_method('propagate_uselib_vars', 'process_source')
def apply_incpaths(self):
lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES'])
self.includes_nodes = lst
bld = self.bld
self.env['INCPATHS'] = [x.is_child_of(bld.srcnode) and x.path_from(bld.bldnode) or x.abspath() for x in lst]
|
AlanZatarain/pacupdate
|
refs/heads/master
|
src/pacupdate/__init__.py
|
12133432
| |
HybridF5/jacket
|
refs/heads/master
|
jacket/common/compute/__init__.py
|
12133432
| |
foolcage/foolcage
|
refs/heads/master
|
foolspider/foolspider/cmds/__init__.py
|
12133432
| |
sodafree/backend
|
refs/heads/master
|
tests/regressiontests/localflavor/co/__init__.py
|
12133432
| |
ingokegel/intellij-community
|
refs/heads/master
|
python/testData/packaging/PyPackageUtil/AbsentSetupCallUpdating/setup.py
|
70
|
from distutils.core import setup
|
linea-it/qlf
|
refs/heads/master
|
backend/framework/qlf/dashboard/migrations/0008_processcomment.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-12 20:06
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dashboard', '0007_auto_20180607_1930'),
]
operations = [
migrations.CreateModel(
name='ProcessComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True, null=True)),
('date', models.DateField(default=datetime.date.today)),
('process', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='process_comments', to='dashboard.Process')),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
itandt/unisportr
|
refs/heads/master
|
temp/sitemapgenerator/sitemap_gen.py
|
9
|
#!/usr/bin/env python
#
# Copyright (c) 2004, 2005 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Google nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The sitemap_gen.py script is written in Python 2.2 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
__usage__ = \
"""A simple script to automatically produce sitemaps for a webserver,
in the Google Sitemap Protocol (GSP).
Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
--config=config.xml, specifies config file location
--help, displays usage message
--testing, specified when user is experimenting
"""
# Please be careful that all syntax used in this file can be parsed on
# Python 1.5 -- this version check is not evaluated until after the
# entire file has been parsed.
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
sys.exit(1)
import fnmatch
import glob
import gzip
import md5
import os
import re
import stat
import time
import types
import urllib
import urlparse
import xml.sax
# True and False were introduced in Python2.2.2
try:
testTrue=True
del testTrue
except NameError:
True=1
False=0
# Text encodings
ENC_ASCII = 'ASCII'
ENC_UTF8 = 'UTF-8'
ENC_IDNA = 'IDNA'
ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
'ANSI_X3.4-1986', 'CPASCII' ]
ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
# Available Sitemap types
SITEMAP_TYPES = ['web', 'mobile', 'news']
# General Sitemap tags
GENERAL_SITEMAP_TAGS = ['loc', 'changefreq', 'priority', 'lastmod']
# News specific tags
NEWS_SPECIFIC_TAGS = ['keywords', 'publication_date', 'stock_tickers']
# News Sitemap tags
NEWS_SITEMAP_TAGS = GENERAL_SITEMAP_TAGS + NEWS_SPECIFIC_TAGS
# Maximum number of urls in each sitemap, before next Sitemap is created
MAXURLS_PER_SITEMAP = 50000
# Suffix on a Sitemap index file
SITEINDEX_SUFFIX = '_index.xml'
# Regular expressions tried for extracting URLs from access logs.
ACCESSLOG_CLF_PATTERN = re.compile(
r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+.*'
)
# Match patterns for lastmod attributes
DATE_PATTERNS = map(re.compile, [
r'^\d\d\d\d$',
r'^\d\d\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
])
# Match patterns for changefreq attributes
CHANGEFREQ_PATTERNS = [
'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
]
# XML formats
GENERAL_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
NEWS_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
SITEINDEX_FOOTER = '</sitemapindex>\n'
SITEINDEX_ENTRY = \
' <sitemap>\n' \
' <loc>%(loc)s</loc>\n' \
' <lastmod>%(lastmod)s</lastmod>\n' \
' </sitemap>\n'
GENERAL_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
NEWS_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
SITEMAP_FOOTER = '</urlset>\n'
SITEURL_XML_PREFIX = ' <url>\n'
SITEURL_XML_SUFFIX = ' </url>\n'
NEWS_TAG_XML_PREFIX = ' <news:news>\n'
NEWS_TAG_XML_SUFFIX = ' </news:news>\n'
# Search engines to notify with the updated sitemaps
#
# This list is very non-obvious in what's going on. Here's the gist:
# Each item in the list is a 6-tuple of items. The first 5 are "almost"
# the same as the input arguments to urlparse.urlunsplit():
# 0 - schema
# 1 - netloc
# 2 - path
# 3 - query <-- EXCEPTION: specify a query map rather than a string
# 4 - fragment
# Additionally, add item 5:
# 5 - query attribute that should be set to the new Sitemap URL
# Clear as mud, I know.
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap'),
]
class Error(Exception):
"""
Base exception class. In this module we tend not to use our own exception
types for very much, but they come in very handy on XML parsing with SAX.
"""
pass
#end class Error
class SchemaError(Error):
"""Failure to process an XML file according to the schema we know."""
pass
#end class SchemeError
class Encoder:
"""
Manages wide-character/narrow-character conversions for just about all
text that flows into or out of the script.
You should always use this class for string coercion, as opposed to
letting Python handle coercions automatically. Reason: Python
usually assumes ASCII (7-bit) as a default narrow character encoding,
which is not the kind of data we generally deal with.
General high-level methodologies used in sitemap_gen:
[PATHS]
File system paths may be wide or narrow, depending on platform.
This works fine, just be aware of it and be very careful to not
mix them. That is, if you have to pass several file path arguments
into a library call, make sure they are all narrow or all wide.
This class has MaybeNarrowPath() which should be called on every
file system path you deal with.
[URLS]
URL locations are stored in Narrow form, already escaped. This has the
benefit of keeping escaping and encoding as close as possible to the format
we read them in. The downside is we may end up with URLs that have
intermingled encodings -- the root path may be encoded in one way
while the filename is encoded in another. This is obviously wrong, but
it should hopefully be an issue hit by very few users. The workaround
from the user level (assuming they notice) is to specify a default_encoding
parameter in their config file.
[OTHER]
Other text, such as attributes of the URL class, configuration options,
etc, are generally stored in Unicode for simplicity.
"""
def __init__(self):
self._user = None # User-specified default encoding
self._learned = [] # Learned default encodings
self._widefiles = False # File system can be wide
# Can the file system be Unicode?
try:
self._widefiles = os.path.supports_unicode_filenames
except AttributeError:
try:
self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
except AttributeError:
pass
# Try to guess a working default
try:
encoding = sys.getfilesystemencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
except AttributeError:
pass
if not self._learned:
encoding = sys.getdefaultencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
# If we had no guesses, start with some European defaults
if not self._learned:
self._learned = ENC_DEFAULT_LIST
#end def __init__
def SetUserEncoding(self, encoding):
self._user = encoding
#end def SetUserEncoding
def NarrowText(self, text, encoding):
""" Narrow a piece of arbitrary text """
if type(text) != types.UnicodeType:
return text
# Try the passed in preference
if encoding:
try:
result = text.encode(encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return text.encode(self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return text.encode(self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return text.encode(ENC_UTF8)
except UnicodeError:
pass
# Something is seriously wrong if we get to here
return text.encode(ENC_ASCII, 'ignore')
#end def NarrowText
def MaybeNarrowPath(self, text):
""" Paths may be allowed to stay wide """
if self._widefiles:
return text
return self.NarrowText(text, None)
#end def MaybeNarrowPath
def WidenText(self, text, encoding):
""" Widen a piece of arbitrary text """
if type(text) != types.StringType:
return text
# Try the passed in preference
if encoding:
try:
result = unicode(text, encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return unicode(text, self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return unicode(text, self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return unicode(text, ENC_UTF8)
except UnicodeError:
pass
# Getting here means it wasn't UTF-8 and we had no working default.
# We really don't have anything "right" we can do anymore.
output.Warn('Unrecognized encoding in text: %s' % text)
if not self._user:
output.Warn('You may need to set a default_encoding in your '
'configuration file.')
return text.decode(ENC_ASCII, 'ignore')
#end def WidenText
#end class Encoder
encoder = Encoder()
class Output:
"""
Exposes logging functionality, and tracks how many errors
we have thus output.
Logging levels should be used as thus:
Fatal -- extremely sparingly
Error -- config errors, entire blocks of user 'intention' lost
Warn -- individual URLs lost
Log(,0) -- Un-suppressable text that's not an error
Log(,1) -- touched files, major actions
Log(,2) -- parsing notes, filtered or duplicated URLs
Log(,3) -- each accepted URL
"""
def __init__(self):
self.num_errors = 0 # Count of errors
self.num_warns = 0 # Count of warnings
self._errors_shown = {} # Shown errors
self._warns_shown = {} # Shown warnings
self._verbose = 0 # Level of verbosity
#end def __init__
def Log(self, text, level):
""" Output a blurb of diagnostic text, if the verbose level allows it """
if text:
text = encoder.NarrowText(text, None)
if self._verbose >= level:
print text
#end def Log
def Warn(self, text):
""" Output and count a warning. Suppress duplicate warnings. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._warns_shown.has_key(hash):
self._warns_shown[hash] = 1
print '[WARNING] ' + text
else:
self.Log('(suppressed) [WARNING] ' + text, 3)
self.num_warns = self.num_warns + 1
#end def Warn
def Error(self, text):
""" Output and count an error. Suppress duplicate errors. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._errors_shown.has_key(hash):
self._errors_shown[hash] = 1
print '[ERROR] ' + text
else:
self.Log('(suppressed) [ERROR] ' + text, 3)
self.num_errors = self.num_errors + 1
#end def Error
def Fatal(self, text):
""" Output an error and terminate the program. """
if text:
text = encoder.NarrowText(text, None)
print '[FATAL] ' + text
else:
print 'Fatal error.'
sys.exit(1)
#end def Fatal
def SetVerbose(self, level):
""" Sets the verbose level. """
try:
if type(level) != types.IntType:
level = int(level)
if (level >= 0) and (level <= 3):
self._verbose = level
return
except ValueError:
pass
self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
#end def SetVerbose
#end class Output
output = Output()
class URL(object):
""" URL is a smart structure grouping together the properties we
care about for a single web reference. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority'
def __init__(self):
self.loc = None # URL -- in Narrow characters
self.lastmod = None # ISO8601 timestamp of last modify
self.changefreq = None # Text term for update frequency
self.priority = None # Float between 0 and 1 (inc)
#end def __init__
def __cmp__(self, other):
if self.loc < other.loc:
return -1
if self.loc > other.loc:
return 1
return 0
#end def __cmp__
def TrySetAttribute(self, attribute, value):
""" Attempt to set the attribute to the value, with a pretty try
block around it. """
if attribute == 'loc':
self.loc = self.Canonicalize(value)
else:
try:
setattr(self, attribute, value)
except AttributeError:
output.Warn('Unknown URL attribute: %s' % attribute)
#end def TrySetAttribute
def IsAbsolute(loc):
""" Decide if the URL is absolute or not """
if not loc:
return False
narrow = encoder.NarrowText(loc, None)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
if (not scheme) or (not netloc):
return False
return True
#end def IsAbsolute
IsAbsolute = staticmethod(IsAbsolute)
def Canonicalize(loc):
""" Do encoding and canonicalization on a URL string """
if not loc:
return loc
# Let the encoder try to narrow it
narrow = encoder.NarrowText(loc, None)
# Escape components individually
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
unr = '-._~'
sub = '!$&\'()*+,;='
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
path = urllib.quote(path, unr + sub + '%:@/')
query = urllib.quote(query, unr + sub + '%:@/?')
frag = urllib.quote(frag, unr + sub + '%:@/?')
# Try built-in IDNA encoding on the netloc
try:
(ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
for c in widenetloc:
if c >= unichr(128):
netloc = widenetloc.encode(ENC_IDNA)
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
break
except UnicodeError:
# urlsplit must have failed, based on implementation differences in the
# library. There is not much we can do here, except ignore it.
pass
except LookupError:
output.Warn('An International Domain Name (IDN) is being used, but this '
'version of Python does not have support for IDNA encoding. '
' (IDNA support was introduced in Python 2.3) The encoding '
'we have used instead is wrong and will probably not yield '
'valid URLs.')
bad_netloc = False
if '%' in netloc:
bad_netloc = True
# Put it all back together
narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
# I let '%' through. Fix any that aren't pre-existing escapes.
HEXDIG = '0123456789abcdefABCDEF'
list = narrow.split('%')
narrow = list[0]
del list[0]
for item in list:
if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
narrow = narrow + '%' + item
else:
narrow = narrow + '%25' + item
# Issue a warning if this is a bad URL
if bad_netloc:
output.Warn('Invalid characters in the host or domain portion of a URL: '
+ narrow)
return narrow
#end def Canonicalize
Canonicalize = staticmethod(Canonicalize)
def VerifyDate(self, date, metatag):
"""Verify the date format is valid"""
match = False
if date:
date = date.upper()
for pattern in DATE_PATTERNS:
match = pattern.match(date)
if match:
return True
if not match:
output.Warn('The value for %s does not appear to be in ISO8601 '
'format on URL: %s' % (metatag, self.loc))
return False
#end of VerifyDate
def Validate(self, base_url, allow_fragment):
""" Verify the data in this URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
# Test (and normalize) the ref
if not self.loc:
output.Warn('Empty URL')
return False
if allow_fragment:
self.loc = urlparse.urljoin(base_url, self.loc)
if not self.loc.startswith(base_url):
output.Warn('Discarded URL for not starting with the base_url: %s' %
self.loc)
self.loc = None
return False
# Test the lastmod
if self.lastmod:
if not self.VerifyDate(self.lastmod, "lastmod"):
self.lastmod = None
# Test the changefreq
if self.changefreq:
match = False
self.changefreq = self.changefreq.lower()
for pattern in CHANGEFREQ_PATTERNS:
if self.changefreq == pattern:
match = True
break
if not match:
output.Warn('Changefreq "%s" is not a valid change frequency on URL '
': %s' % (self.changefreq, self.loc))
self.changefreq = None
# Test the priority
if self.priority:
priority = -1.0
try:
priority = float(self.priority)
except ValueError:
pass
if (priority < 0.0) or (priority > 1.0):
output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
'on URL: %s' % (self.priority, self.loc))
self.priority = None
return True
#end def Validate
def MakeHash(self):
""" Provides a uniform way of hashing URLs """
if not self.loc:
return None
if self.loc.endswith('/'):
return md5.new(self.loc[:-1]).digest()
return md5.new(self.loc).digest()
#end def MakeHash
def Log(self, prefix='URL', level=3):
""" Dump the contents, empty or not, to the log. """
out = prefix + ':'
for attribute in self.__slots__:
value = getattr(self, attribute)
if not value:
value = ''
out = out + (' %s=[%s]' % (attribute, value))
output.Log('%s' % encoder.NarrowText(out, None), level)
#end def Log
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class URL
class NewsURL(URL):
""" NewsURL is a subclass of URL with News-Sitemap specific properties. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority', 'publication_date', \
'keywords', 'stock_tickers'
def __init__(self):
URL.__init__(self)
self.publication_date = None # ISO8601 timestamp of publication date
self.keywords = None # Text keywords
self.stock_tickers = None # Text stock
#end def __init__
def Validate(self, base_url, allow_fragment):
""" Verify the data in this News URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
if not URL.Validate(self, base_url, allow_fragment):
return False
if not URL.VerifyDate(self, self.publication_date, "publication_date"):
self.publication_date = None
return True
#end def Validate
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
# printed_news_tag indicates if news-specific metatags are present
printed_news_tag = False
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
if attribute in NEWS_SPECIFIC_TAGS:
if not printed_news_tag:
printed_news_tag = True
out = out + NEWS_TAG_XML_PREFIX
out = out + (' <news:%s>%s</news:%s>\n' % (attribute, value, attribute))
else:
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
if printed_news_tag:
out = out + NEWS_TAG_XML_SUFFIX
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class NewsURL
class Filter:
"""
A filter on the stream of URLs we find. A filter is, in essence,
a wildcard applied to the stream. You can think of this as an
operator that returns a tri-state when given a URL:
True -- this URL is to be included in the sitemap
None -- this URL is undecided
False -- this URL is to be dropped from the sitemap
"""
def __init__(self, attributes):
self._wildcard = None # Pattern for wildcard match
self._regexp = None # Pattern for regexp match
self._pass = False # "Drop" filter vs. "Pass" filter
if not ValidateAttributes('FILTER', attributes,
('pattern', 'type', 'action')):
return
# Check error count on the way in
num_errors = output.num_errors
# Fetch the attributes
pattern = attributes.get('pattern')
type = attributes.get('type', 'wildcard')
action = attributes.get('action', 'drop')
if type:
type = type.lower()
if action:
action = action.lower()
# Verify the attributes
if not pattern:
output.Error('On a filter you must specify a "pattern" to match')
elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
output.Error('On a filter you must specify either \'type="wildcard"\' '
'or \'type="regexp"\'')
elif (action != 'pass') and (action != 'drop'):
output.Error('If you specify a filter action, it must be either '
'\'action="pass"\' or \'action="drop"\'')
# Set the rule
if action == 'drop':
self._pass = False
elif action == 'pass':
self._pass = True
if type == 'wildcard':
self._wildcard = pattern
elif type == 'regexp':
try:
self._regexp = re.compile(pattern)
except re.error:
output.Error('Bad regular expression: %s' % pattern)
# Log the final results iff we didn't add any errors
if num_errors == output.num_errors:
output.Log('Filter: %s any URL that matches %s "%s"' %
(action, type, pattern), 2)
#end def __init__
def Apply(self, url):
""" Process the URL, as above. """
if (not url) or (not url.loc):
return None
if self._wildcard:
if fnmatch.fnmatchcase(url.loc, self._wildcard):
return self._pass
return None
if self._regexp:
if self._regexp.search(url.loc):
return self._pass
return None
assert False # unreachable
#end def Apply
#end class Filter
class InputURL:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a single URL, manually specified in the config file.
"""
def __init__(self, attributes):
self._url = None # The lonely URL
if not ValidateAttributes('URL', attributes,
('href', 'lastmod', 'changefreq', 'priority')):
return
url = URL()
for attr in attributes.keys():
if attr == 'href':
url.TrySetAttribute('loc', attributes[attr])
else:
url.TrySetAttribute(attr, attributes[attr])
if not url.loc:
output.Error('Url entries must have an href attribute.')
return
self._url = url
output.Log('Input: From URL "%s"' % self._url.loc, 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if self._url:
consumer(self._url, True)
#end def ProduceURLs
#end class InputURL
class InputURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of URLs
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on space
url = URL()
cols = line.split(' ')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
url.TrySetAttribute('loc', cols[0])
# Extract attributes from the other columns
for i in range(1,len(cols)):
if cols[i]:
try:
(attr_name, attr_val) = cols[i].split('=', 1)
url.TrySetAttribute(attr_name, attr_val)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputURLList
class InputNewsURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of News URLs and their metadata
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._tag_order = [] # Order of URL metadata
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding', \
'tag_order')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
self._tag_order = attributes.get('tag_order')
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
# parse tag_order into an array
# tag_order_ascii created for more readable logging
tag_order_ascii = []
if self._tag_order:
self._tag_order = self._tag_order.split(",")
for i in range(0, len(self._tag_order)):
element = self._tag_order[i].strip().lower()
self._tag_order[i]= element
tag_order_ascii.append(element.encode('ascii'))
output.Log('Input: From URLLIST tag order is "%s"' % tag_order_ascii, 0)
else:
output.Error('News Urllist configuration file must contain tag_order '
'to define Sitemap metatags.')
# verify all tag_order inputs are valid
tag_order_dict = {}
for tag in self._tag_order:
tag_order_dict[tag] = ""
if not ValidateAttributes('URLLIST', tag_order_dict, \
NEWS_SITEMAP_TAGS):
return
# loc tag must be present
loc_tag = False
for tag in self._tag_order:
if tag == 'loc':
loc_tag = True
break
if not loc_tag:
output.Error('News Urllist tag_order in configuration file '
'does not contain "loc" value: %s' % tag_order_ascii)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on tabs
url = NewsURL()
cols = line.split('\t')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
for i in range(0,len(cols)):
if cols[i]:
attr_value = cols[i]
if i < len(self._tag_order):
attr_name = self._tag_order[i]
try:
url.TrySetAttribute(attr_name, attr_value)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputNewsURLList
class InputDirectory:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a directory that acts as base for walking the filesystem.
"""
def __init__(self, attributes, base_url):
self._path = None # The directory
self._url = None # The URL equivalent
self._default_file = None
self._remove_empty_directories = False
if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
'default_file', 'remove_empty_directories')):
return
# Prep the path -- it MUST end in a sep
path = attributes.get('path')
if not path:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
path = encoder.MaybeNarrowPath(path)
if not path.endswith(os.sep):
path = path + os.sep
if not os.path.isdir(path):
output.Error('Can not locate directory: %s' % path)
return
# Prep the URL -- it MUST end in a sep
url = attributes.get('url')
if not url:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
url = URL.Canonicalize(url)
if not url.endswith('/'):
url = url + '/'
if not url.startswith(base_url):
url = urlparse.urljoin(base_url, url)
if not url.startswith(base_url):
output.Error('The directory URL "%s" is not relative to the '
'base_url: %s' % (url, base_url))
return
# Prep the default file -- it MUST be just a filename
file = attributes.get('default_file')
if file:
file = encoder.MaybeNarrowPath(file)
if os.sep in file:
output.Error('The default_file "%s" can not include path information.'
% file)
file = None
# Prep the remove_empty_directories -- default is false
remove_empty_directories = attributes.get('remove_empty_directories')
if remove_empty_directories:
if (remove_empty_directories == '1') or \
(remove_empty_directories.lower() == 'true'):
remove_empty_directories = True
elif (remove_empty_directories == '0') or \
(remove_empty_directories.lower() == 'false'):
remove_empty_directories = False
# otherwise the user set a non-default value
else:
output.Error('Configuration file remove_empty_directories '
'value is not recognized. Value must be true or false.')
return
else:
remove_empty_directories = False
self._path = path
self._url = url
self._default_file = file
self._remove_empty_directories = remove_empty_directories
if file:
output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
% (path, url, file), 2)
else:
output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
% (path, url), 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if not self._path:
return
root_path = self._path
root_URL = self._url
root_file = self._default_file
remove_empty_directories = self._remove_empty_directories
def HasReadPermissions(path):
""" Verifies a given path has read permissions. """
stat_info = os.stat(path)
mode = stat_info[stat.ST_MODE]
if mode & stat.S_IREAD:
return True
else:
return None
def PerFile(dirpath, name):
"""
Called once per file.
Note that 'name' will occasionally be None -- for a directory itself
"""
# Pull a timestamp
url = URL()
isdir = False
try:
if name:
path = os.path.join(dirpath, name)
else:
path = dirpath
isdir = os.path.isdir(path)
time = None
if isdir and root_file:
file = os.path.join(path, root_file)
try:
time = os.stat(file)[stat.ST_MTIME];
except OSError:
pass
if not time:
time = os.stat(path)[stat.ST_MTIME];
url.lastmod = TimestampISO8601(time)
except OSError:
pass
except ValueError:
pass
# Build a URL
middle = dirpath[len(root_path):]
if os.sep != '/':
middle = middle.replace(os.sep, '/')
if middle:
middle = middle + '/'
if name:
middle = middle + name
if isdir:
middle = middle + '/'
url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
# Suppress default files. (All the way down here so we can log it.)
if name and (root_file == name):
url.Log(prefix='IGNORED (default file)', level=2)
return
# Suppress directories when remove_empty_directories="true"
try:
if isdir:
if HasReadPermissions(path):
if remove_empty_directories == 'true' and \
len(os.listdir(path)) == 0:
output.Log('IGNORED empty directory %s' % str(path), level=1)
return
elif path == self._path:
output.Error('IGNORED configuration file directory input %s due '
'to file permissions' % self._path)
else:
output.Log('IGNORED files within directory %s due to file '
'permissions' % str(path), level=0)
except OSError:
pass
except ValueError:
pass
consumer(url, False)
#end def PerFile
def PerDirectory(ignore, dirpath, namelist):
"""
Called once per directory with a list of all the contained files/dirs.
"""
ignore = ignore # Avoid warnings of an unused parameter
if not dirpath.startswith(root_path):
output.Warn('Unable to decide what the root path is for directory: '
'%s' % dirpath)
return
for name in namelist:
PerFile(dirpath, name)
#end def PerDirectory
output.Log('Walking DIRECTORY "%s"' % self._path, 1)
PerFile(self._path, None)
os.path.walk(self._path, PerDirectory, None)
#end def ProduceURLs
#end class InputDirectory
class InputAccessLog:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles access logs. It's non-trivial in that we want to
auto-detect log files in the Common Logfile Format (as used by Apache,
for instance) and the Extended Log File Format (as used by IIS, for
instance).
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._is_elf = False # Extended Log File Format?
self._is_clf = False # Common Logfile Format?
self._elf_status = -1 # ELF field: '200'
self._elf_method = -1 # ELF field: 'HEAD'
self._elf_uri = -1 # ELF field: '/foo?bar=1'
self._elf_urifrag1 = -1 # ELF field: '/foo'
self._elf_urifrag2 = -1 # ELF field: 'bar=1'
if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Accesslog entries must have a "path" attribute.')
#end def __init__
def RecognizeELFLine(self, line):
""" Recognize the Fields directive that heads an ELF file """
if not line.startswith('#Fields:'):
return False
fields = line.split(' ')
del fields[0]
for i in range(0, len(fields)):
field = fields[i].strip()
if field == 'sc-status':
self._elf_status = i
elif field == 'cs-method':
self._elf_method = i
elif field == 'cs-uri':
self._elf_uri = i
elif field == 'cs-uri-stem':
self._elf_urifrag1 = i
elif field == 'cs-uri-query':
self._elf_urifrag2 = i
output.Log('Recognized an Extended Log File Format file.', 2)
return True
#end def RecognizeELFLine
def GetELFLine(self, line):
""" Fetch the requested URL from an ELF line """
fields = line.split(' ')
count = len(fields)
# Verify status was Ok
if self._elf_status >= 0:
if self._elf_status >= count:
return None
if not fields[self._elf_status].strip() == '200':
return None
# Verify method was HEAD or GET
if self._elf_method >= 0:
if self._elf_method >= count:
return None
if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
return None
# Pull the full URL if we can
if self._elf_uri >= 0:
if self._elf_uri >= count:
return None
url = fields[self._elf_uri].strip()
if url != '-':
return url
# Put together a fragmentary URL
if self._elf_urifrag1 >= 0:
if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
return None
urlfrag1 = fields[self._elf_urifrag1].strip()
urlfrag2 = None
if self._elf_urifrag2 >= 0:
urlfrag2 = fields[self._elf_urifrag2]
if urlfrag1 and (urlfrag1 != '-'):
if urlfrag2 and (urlfrag2 != '-'):
urlfrag1 = urlfrag1 + '?' + urlfrag2
return urlfrag1
return None
#end def GetELFLine
def RecognizeCLFLine(self, line):
""" Try to tokenize a logfile line according to CLF pattern and see if
it works. """
match = ACCESSLOG_CLF_PATTERN.match(line)
recognize = match and (match.group(1) in ('HEAD', 'GET'))
if recognize:
output.Log('Recognized a Common Logfile Format file.', 2)
return recognize
#end def RecognizeCLFLine
def GetCLFLine(self, line):
""" Fetch the requested URL from a CLF line """
match = ACCESSLOG_CLF_PATTERN.match(line)
if match:
request = match.group(1)
if request in ('HEAD', 'GET'):
return match.group(2)
return None
#end def GetCLFLine
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
if not file:
return
# Iterate lines
for line in file.readlines():
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
# If we don't know the format yet, try them both
if (not self._is_clf) and (not self._is_elf):
self._is_elf = self.RecognizeELFLine(line)
self._is_clf = self.RecognizeCLFLine(line)
# Digest the line
match = None
if self._is_elf:
match = self.GetELFLine(line)
elif self._is_clf:
match = self.GetCLFLine(line)
if not match:
continue
# Pass it on
url = URL()
url.TrySetAttribute('loc', match)
consumer(url, True)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputAccessLog
class FilePathGenerator:
"""
This class generates filenames in a series, upon request.
You can request any iteration number at any time, you don't
have to go in order.
Example of iterations for '/path/foo.xml.gz':
0 --> /path/foo.xml.gz
1 --> /path/foo1.xml.gz
2 --> /path/foo2.xml.gz
_index.xml --> /path/foo_index.xml
"""
def __init__(self):
self.is_gzip = False # Is this a GZIP file?
self._path = None # '/path/'
self._prefix = None # 'foo'
self._suffix = None # '.xml.gz'
#end def __init__
def Preload(self, path):
""" Splits up a path into forms ready for recombination. """
path = encoder.MaybeNarrowPath(path)
# Get down to a base name
path = os.path.normpath(path)
base = os.path.basename(path).lower()
if not base:
output.Error('Couldn\'t parse the file path: %s' % path)
return False
lenbase = len(base)
# Recognize extension
lensuffix = 0
compare_suffix = ['.xml', '.xml.gz', '.gz']
for suffix in compare_suffix:
if base.endswith(suffix):
lensuffix = len(suffix)
break
if not lensuffix:
output.Error('The path "%s" doesn\'t end in a supported file '
'extension.' % path)
return False
self.is_gzip = suffix.endswith('.gz')
# Split the original path
lenpath = len(path)
self._path = path[:lenpath-lenbase]
self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
self._suffix = path[lenpath-lensuffix:]
return True
#end def Preload
def GeneratePath(self, instance):
""" Generates the iterations, as described above. """
prefix = self._path + self._prefix
if type(instance) == types.IntType:
if instance:
return '%s%d%s' % (prefix, instance, self._suffix)
return prefix + self._suffix
return prefix + instance
#end def GeneratePath
def GenerateURL(self, instance, root_url):
""" Generates iterations, but as a URL instead of a path. """
prefix = root_url + self._prefix
retval = None
if type(instance) == types.IntType:
if instance:
retval = '%s%d%s' % (prefix, instance, self._suffix)
else:
retval = prefix + self._suffix
else:
retval = prefix + instance
return URL.Canonicalize(retval)
#end def GenerateURL
def GenerateWildURL(self, root_url):
""" Generates a wildcard that should match all our iterations """
prefix = URL.Canonicalize(root_url + self._prefix)
temp = URL.Canonicalize(prefix + self._suffix)
suffix = temp[len(prefix):]
return prefix + '*' + suffix
#end def GenerateURL
#end class FilePathGenerator
class PerURLStatistics:
""" Keep track of some simple per-URL statistics, like file extension. """
def __init__(self):
self._extensions = {} # Count of extension instances
#end def __init__
def Consume(self, url):
""" Log some stats for the URL. At the moment, that means extension. """
if url and url.loc:
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
if not path:
return
# Recognize directories
if path.endswith('/'):
if self._extensions.has_key('/'):
self._extensions['/'] = self._extensions['/'] + 1
else:
self._extensions['/'] = 1
return
# Strip to a filename
i = path.rfind('/')
if i >= 0:
assert i < len(path)
path = path[i:]
# Find extension
i = path.rfind('.')
if i > 0:
assert i < len(path)
ext = path[i:].lower()
if self._extensions.has_key(ext):
self._extensions[ext] = self._extensions[ext] + 1
else:
self._extensions[ext] = 1
else:
if self._extensions.has_key('(no extension)'):
self._extensions['(no extension)'] = self._extensions[
'(no extension)'] + 1
else:
self._extensions['(no extension)'] = 1
#end def Consume
def Log(self):
""" Dump out stats to the output. """
if len(self._extensions):
output.Log('Count of file extensions on URLs:', 1)
set = self._extensions.keys()
set.sort()
for ext in set:
output.Log(' %7d %s' % (self._extensions[ext], ext), 1)
#end def Log
class Sitemap(xml.sax.handler.ContentHandler):
"""
This is the big workhorse class that processes your inputs and spits
out sitemap files. It is built as a SAX handler for set up purposes.
That is, it processes an XML stream to bring itself up.
"""
def __init__(self, suppress_notify):
xml.sax.handler.ContentHandler.__init__(self)
self._filters = [] # Filter objects
self._inputs = [] # Input objects
self._urls = {} # Maps URLs to count of dups
self._set = [] # Current set of URLs
self._filegen = None # Path generator for output files
self._wildurl1 = None # Sitemap URLs to filter out
self._wildurl2 = None # Sitemap URLs to filter out
self._sitemaps = 0 # Number of output files
# We init _dup_max to 2 so the default priority is 0.5 instead of 1.0
self._dup_max = 2 # Max number of duplicate URLs
self._stat = PerURLStatistics() # Some simple stats
self._in_site = False # SAX: are we in a Site node?
self._in_Site_ever = False # SAX: were we ever in a Site?
self._default_enc = None # Best encoding to try on URLs
self._base_url = None # Prefix to all valid URLs
self._store_into = None # Output filepath
self._sitemap_type = None # Sitemap type (web, mobile or news)
self._suppress = suppress_notify # Suppress notify of servers
#end def __init__
def ValidateBasicConfig(self):
""" Verifies (and cleans up) the basic user-configurable options. """
all_good = True
if self._default_enc:
encoder.SetUserEncoding(self._default_enc)
# Canonicalize the base_url
if all_good and not self._base_url:
output.Error('A site needs a "base_url" attribute.')
all_good = False
if all_good and not URL.IsAbsolute(self._base_url):
output.Error('The "base_url" must be absolute, not relative: %s' %
self._base_url)
all_good = False
if all_good:
self._base_url = URL.Canonicalize(self._base_url)
if not self._base_url.endswith('/'):
self._base_url = self._base_url + '/'
output.Log('BaseURL is set to: %s' % self._base_url, 2)
# Load store_into into a generator
if all_good:
if self._store_into:
self._filegen = FilePathGenerator()
if not self._filegen.Preload(self._store_into):
all_good = False
else:
output.Error('A site needs a "store_into" attribute.')
all_good = False
# Ask the generator for patterns on what its output will look like
if all_good:
self._wildurl1 = self._filegen.GenerateWildURL(self._base_url)
self._wildurl2 = self._filegen.GenerateURL(SITEINDEX_SUFFIX,
self._base_url)
# Unify various forms of False
if all_good:
if self._suppress:
if (type(self._suppress) == types.StringType) or (type(self._suppress)
== types.UnicodeType):
if (self._suppress == '0') or (self._suppress.lower() == 'false'):
self._suppress = False
# Clean up the sitemap_type
if all_good:
match = False
# If sitemap_type is not specified, default to web sitemap
if not self._sitemap_type:
self._sitemap_type = 'web'
else:
self._sitemap_type = self._sitemap_type.lower()
for pattern in SITEMAP_TYPES:
if self._sitemap_type == pattern:
match = True
break
if not match:
output.Error('The "sitemap_type" value must be "web", "mobile" '
'or "news": %s' % self._sitemap_type)
all_good = False
output.Log('The Sitemap type is %s Sitemap.' % \
self._sitemap_type.upper(), 0)
# Done
if not all_good:
output.Log('See "example_config.xml" for more information.', 0)
return all_good
#end def ValidateBasicConfig
def Generate(self):
""" Run over all the Inputs and ask them to Produce """
# Run the inputs
for input in self._inputs:
input.ProduceURLs(self.ConsumeURL)
# Do last flushes
if len(self._set):
self.FlushSet()
if not self._sitemaps:
output.Warn('No URLs were recorded, writing an empty sitemap.')
self.FlushSet()
# Write an index as needed
if self._sitemaps > 1:
self.WriteIndex()
# Notify
self.NotifySearch()
# Dump stats
self._stat.Log()
#end def Generate
def ConsumeURL(self, url, allow_fragment):
"""
All per-URL processing comes together here, regardless of Input.
Here we run filters, remove duplicates, spill to disk as needed, etc.
"""
if not url:
return
# Validate
if not url.Validate(self._base_url, allow_fragment):
return
# Run filters
accept = None
for filter in self._filters:
accept = filter.Apply(url)
if accept != None:
break
if not (accept or (accept == None)):
url.Log(prefix='FILTERED', level=2)
return
# Ignore our out output URLs
if fnmatch.fnmatchcase(url.loc, self._wildurl1) or fnmatch.fnmatchcase(
url.loc, self._wildurl2):
url.Log(prefix='IGNORED (output file)', level=2)
return
# Note the sighting
hash = url.MakeHash()
if self._urls.has_key(hash):
dup = self._urls[hash]
if dup > 0:
dup = dup + 1
self._urls[hash] = dup
if self._dup_max < dup:
self._dup_max = dup
url.Log(prefix='DUPLICATE')
return
# Acceptance -- add to set
self._urls[hash] = 1
self._set.append(url)
self._stat.Consume(url)
url.Log()
# Flush the set if needed
if len(self._set) >= MAXURLS_PER_SITEMAP:
self.FlushSet()
#end def ConsumeURL
def FlushSet(self):
"""
Flush the current set of URLs to the output. This is a little
slow because we like to sort them all and normalize the priorities
before dumping.
"""
# Determine what Sitemap header to use (News or General)
if self._sitemap_type == 'news':
sitemap_header = NEWS_SITEMAP_HEADER
else:
sitemap_header = GENERAL_SITEMAP_HEADER
# Sort and normalize
output.Log('Sorting and normalizing collected URLs.', 1)
self._set.sort()
for url in self._set:
hash = url.MakeHash()
dup = self._urls[hash]
if dup > 0:
self._urls[hash] = -1
if not url.priority:
url.priority = '%.4f' % (float(dup) / float(self._dup_max))
# Get the filename we're going to write to
filename = self._filegen.GeneratePath(self._sitemaps)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output filename.')
self._sitemaps = self._sitemaps + 1
output.Log('Writing Sitemap file "%s" with %d URLs' %
(filename, len(self._set)), 1)
# Write to it
frame = None
file = None
try:
if self._filegen.is_gzip:
basename = os.path.basename(filename);
frame = open(filename, 'wb')
file = gzip.GzipFile(fileobj=frame, filename=basename, mode='wt')
else:
file = open(filename, 'wt')
file.write(sitemap_header)
for url in self._set:
url.WriteXML(file)
file.write(SITEMAP_FOOTER)
file.close()
if frame:
frame.close()
frame = None
file = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
# Flush
self._set = []
#end def FlushSet
def WriteIndex(self):
""" Write the master index of all Sitemap files """
# Make a filename
filename = self._filegen.GeneratePath(SITEINDEX_SUFFIX)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output index filename.')
output.Log('Writing index file "%s" with %d Sitemaps' %
(filename, self._sitemaps), 1)
# Determine what Sitemap index header to use (News or General)
if self._sitemap_type == 'news':
sitemap_index_header = NEWS_SITEMAP_HEADER
else:
sitemap__index_header = GENERAL_SITEMAP_HEADER
# Make a lastmod time
lastmod = TimestampISO8601(time.time())
# Write to it
try:
fd = open(filename, 'wt')
fd.write(sitemap_index_header)
for mapnumber in range(0,self._sitemaps):
# Write the entry
mapurl = self._filegen.GenerateURL(mapnumber, self._base_url)
mapattributes = { 'loc' : mapurl, 'lastmod' : lastmod }
fd.write(SITEINDEX_ENTRY % mapattributes)
fd.write(SITEINDEX_FOOTER)
fd.close()
fd = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
#end def WriteIndex
def NotifySearch(self):
""" Send notification of the new Sitemap(s) to the search engines. """
if self._suppress:
output.Log('Search engine notification is suppressed.', 1)
return
output.Log('Notifying search engines.', 1)
# Override the urllib's opener class with one that doesn't ignore 404s
class ExceptionURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
output.Log('HTTP error %d: %s' % (errcode, errmsg), 2)
raise IOError
#end def http_error_default
#end class ExceptionURLOpener
old_opener = urllib._urlopener
urllib._urlopener = ExceptionURLopener()
# Build the URL we want to send in
if self._sitemaps > 1:
url = self._filegen.GenerateURL(SITEINDEX_SUFFIX, self._base_url)
else:
url = self._filegen.GenerateURL(0, self._base_url)
# Test if we can hit it ourselves
try:
u = urllib.urlopen(url)
u.close()
except IOError:
output.Error('When attempting to access our generated Sitemap at the '
'following URL:\n %s\n we failed to read it. Please '
'verify the store_into path you specified in\n'
' your configuration file is web-accessable. Consult '
'the FAQ for more\n information.' % url)
output.Warn('Proceeding to notify with an unverifyable URL.')
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
output.Log('Notifying: %s' % ping[1], 0)
output.Log('Notification URL: %s' % notify, 2)
try:
u = urllib.urlopen(notify)
u.read()
u.close()
except IOError:
output.Warn('Cannot contact: %s' % ping[1])
if old_opener:
urllib._urlopener = old_opener
#end def NotifySearch
def startElement(self, tag, attributes):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
if self._in_site:
output.Error('Can not nest Site entries in the configuration.')
else:
self._in_site = True
if not ValidateAttributes('SITE', attributes,
('verbose', 'default_encoding', 'base_url', 'store_into',
'suppress_search_engine_notify', 'sitemap_type')):
return
verbose = attributes.get('verbose', 0)
if verbose:
output.SetVerbose(verbose)
self._default_enc = attributes.get('default_encoding')
self._base_url = attributes.get('base_url')
self._store_into = attributes.get('store_into')
self._sitemap_type= attributes.get('sitemap_type')
if not self._suppress:
self._suppress = attributes.get('suppress_search_engine_notify',
False)
self.ValidateBasicConfig()
elif tag == 'filter':
self._filters.append(Filter(attributes))
elif tag == 'url':
print type(attributes)
self._inputs.append(InputURL(attributes))
elif tag == 'urllist':
for attributeset in ExpandPathAttribute(attributes, 'path'):
if self._sitemap_type == 'news':
self._inputs.append(InputNewsURLList(attributeset))
else:
self._inputs.append(InputURLList(attributeset))
elif tag == 'directory':
self._inputs.append(InputDirectory(attributes, self._base_url))
elif tag == 'accesslog':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputAccessLog(attributeset))
else:
output.Error('Unrecognized tag in the configuration: %s' % tag)
#end def startElement
def endElement(self, tag):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
assert self._in_site
self._in_site = False
self._in_site_ever = True
#end def endElement
def endDocument(self):
""" End of SAX, verify we can proceed. """
if not self._in_site_ever:
output.Error('The configuration must specify a "site" element.')
else:
if not self._inputs:
output.Warn('There were no inputs to generate a sitemap from.')
#end def endDocument
#end class Sitemap
def ValidateAttributes(tag, attributes, goodattributes):
""" Makes sure 'attributes' does not contain any attribute not
listed in 'goodattributes' """
all_good = True
for attr in attributes.keys():
if not attr in goodattributes:
output.Error('Unknown %s attribute: %s' % (tag, attr))
all_good = False
return all_good
#end def ValidateAttributes
def ExpandPathAttribute(src, attrib):
""" Given a dictionary of attributes, return a list of dictionaries
with all the same attributes except for the one named attrib.
That one, we treat as a file path and expand into all its possible
variations. """
# Do the path expansion. On any error, just return the source dictionary.
path = src.get(attrib)
if not path:
return [src]
path = encoder.MaybeNarrowPath(path);
pathlist = glob.glob(path)
if not pathlist:
return [src]
# If this isn't actually a dictionary, make it one
if type(src) != types.DictionaryType:
tmp = {}
for key in src.keys():
tmp[key] = src[key]
src = tmp
# Create N new dictionaries
retval = []
for path in pathlist:
dst = src.copy()
dst[attrib] = path
retval.append(dst)
return retval
#end def ExpandPathAttribute
def OpenFileForRead(path, logtext):
""" Opens a text file, be it GZip or plain """
frame = None
file = None
if not path:
return (frame, file)
try:
if path.endswith('.gz'):
frame = open(path, 'rb')
file = gzip.GzipFile(fileobj=frame, mode='rt')
else:
file = open(path, 'rt')
if logtext:
output.Log('Opened %s file: %s' % (logtext, path), 1)
else:
output.Log('Opened file: %s' % path, 1)
except IOError:
output.Error('Can not open file: %s' % path)
return (frame, file)
#end def OpenFileForRead
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
#end def TimestampISO8601
def CreateSitemapFromFile(configpath, suppress_notify):
""" Sets up a new Sitemap object from the specified configuration file. """
# Remember error count on the way in
num_errors = output.num_errors
# Rev up SAX to parse the config
sitemap = Sitemap(suppress_notify)
try:
output.Log('Reading configuration file: %s' % configpath, 0)
xml.sax.parse(configpath, sitemap)
except IOError:
output.Error('Cannot read configuration file: %s' % configpath)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the config file (line %d, column %d): %s' %
(e._linenum, e._colnum, e.getMessage()))
except xml.sax._exceptions.SAXReaderNotAvailable:
output.Error('Some installs of Python 2.2 did not include complete support'
' for XML.\n Please try upgrading your version of Python'
' and re-running the script.')
# If we added any errors, return no sitemap
if num_errors == output.num_errors:
return sitemap
return None
#end def CreateSitemapFromFile
def ProcessCommandFlags(args):
"""
Parse command line flags per specified usage, pick off key, value pairs
All flags of type "--key=value" will be processed as __flags[key] = value,
"--option" will be processed as __flags[option] = option
"""
flags = {}
rkeyval = '--(?P<key>\S*)[=](?P<value>\S*)' # --key=val
roption = '--(?P<option>\S*)' # --key
r = '(' + rkeyval + ')|(' + roption + ')'
rc = re.compile(r)
for a in args:
try:
rcg = rc.search(a).groupdict()
if rcg.has_key('key'):
flags[rcg['key']] = rcg['value']
if rcg.has_key('option'):
flags[rcg['option']] = rcg['option']
except AttributeError:
return None
return flags
#end def ProcessCommandFlags
#
# __main__
#
if __name__ == '__main__':
flags = ProcessCommandFlags(sys.argv[1:])
if not flags or not flags.has_key('config') or flags.has_key('help'):
output.Log(__usage__, 0)
else:
suppress_notify = flags.has_key('testing')
sitemap = CreateSitemapFromFile(flags['config'], suppress_notify)
if not sitemap:
output.Log('Configuration file errors -- exiting.', 0)
else:
sitemap.Generate()
output.Log('Number of errors: %d' % output.num_errors, 1)
output.Log('Number of warnings: %d' % output.num_warns, 1)
|
hexlism/xx_net
|
refs/heads/master
|
python27/1.0/lib/encodings/utf_16_be.py
|
860
|
""" Python 'utf-16-be' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_be_encode
def decode(input, errors='strict'):
return codecs.utf_16_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_be_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-be',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
ar45/django
|
refs/heads/master
|
tests/context_processors/views.py
|
346
|
from django.shortcuts import render
from .models import DebugObject
def request_processor(request):
return render(request, 'context_processors/request_attrs.html')
def debug_processor(request):
context = {'debug_objects': DebugObject.objects}
return render(request, 'context_processors/debug.html', context)
|
dmarteau/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsstringstatisticalsummary.py
|
30
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsStringStatisticalSummary.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '07/05/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsStringStatisticalSummary
)
from qgis.testing import unittest
class PyQgsStringStatisticalSummary(unittest.TestCase):
def testStats(self):
# we test twice, once with values added as a list and once using values
# added one-at-a-time
s = QgsStringStatisticalSummary()
self.assertEqual(s.statistics(), QgsStringStatisticalSummary.All)
strings = ['cc', 'aaaa', 'bbbbbbbb', 'aaaa', 'eeee', '', 'eeee', 'aaaa', '', 'dddd']
s.calculate(strings)
s2 = QgsStringStatisticalSummary()
for string in strings:
s2.addString(string)
s2.finalize()
self.assertEqual(s.count(), 10)
self.assertEqual(s2.count(), 10)
self.assertEqual(s.countDistinct(), 6)
self.assertEqual(s2.countDistinct(), 6)
self.assertEqual(set(s.distinctValues()), set(['cc', 'aaaa', 'bbbbbbbb', 'eeee', 'dddd', '']))
self.assertEqual(s2.distinctValues(), s.distinctValues())
self.assertEqual(s.countMissing(), 2)
self.assertEqual(s2.countMissing(), 2)
self.assertEqual(s.min(), 'aaaa')
self.assertEqual(s2.min(), 'aaaa')
self.assertEqual(s.max(), 'eeee')
self.assertEqual(s2.max(), 'eeee')
self.assertEqual(s.minLength(), 0)
self.assertEqual(s2.minLength(), 0)
self.assertEqual(s.maxLength(), 8)
self.assertEqual(s2.maxLength(), 8)
self.assertEqual(s.meanLength(), 3.4)
self.assertEqual(s2.meanLength(), 3.4)
self.assertEqual(s.minority(), 'bbbbbbbb')
self.assertEqual(s2.minority(), 'bbbbbbbb')
self.assertEqual(s.majority(), 'aaaa')
self.assertEqual(s2.majority(), 'aaaa')
# extra check for minLength without empty strings
s.calculate(['1111111', '111', '11111'])
self.assertEqual(s.minLength(), 3)
def testIndividualStats(self):
# tests calculation of statistics one at a time, to make sure statistic calculations are not
# dependent on each other
tests = [{'stat': QgsStringStatisticalSummary.Count, 'expected': 10},
{'stat': QgsStringStatisticalSummary.CountDistinct, 'expected': 6},
{'stat': QgsStringStatisticalSummary.CountMissing, 'expected': 2},
{'stat': QgsStringStatisticalSummary.Min, 'expected': 'aaaa'},
{'stat': QgsStringStatisticalSummary.Max, 'expected': 'eeee'},
{'stat': QgsStringStatisticalSummary.MinimumLength, 'expected': 0},
{'stat': QgsStringStatisticalSummary.MaximumLength, 'expected': 8},
{'stat': QgsStringStatisticalSummary.MeanLength, 'expected': 3.4},
{'stat': QgsStringStatisticalSummary.Minority, 'expected': 'bbbbbbbb'},
{'stat': QgsStringStatisticalSummary.Majority, 'expected': 'aaaa'},
]
s = QgsStringStatisticalSummary()
s3 = QgsStringStatisticalSummary()
for t in tests:
# test constructor
s2 = QgsStringStatisticalSummary(t['stat'])
self.assertEqual(s2.statistics(), t['stat'])
s.setStatistics(t['stat'])
s3.setStatistics(t['stat'])
self.assertEqual(s.statistics(), t['stat'])
strings = ['cc', 'aaaa', 'bbbbbbbb', 'aaaa', 'eeee', '', 'eeee', 'aaaa', '', 'dddd']
s.calculate(strings)
s3.reset()
for string in strings:
s3.addString(string)
s3.finalize()
self.assertEqual(s.statistic(t['stat']), t['expected'])
self.assertEqual(s3.statistic(t['stat']), t['expected'])
# display name
self.assertTrue(len(QgsStringStatisticalSummary.displayName(t['stat'])) > 0)
def testVariantStats(self):
s = QgsStringStatisticalSummary()
self.assertEqual(s.statistics(), QgsStringStatisticalSummary.All)
s.calculateFromVariants(['cc', 5, 'bbbb', 'aaaa', 'eeee', 6, 9, '9', ''])
self.assertEqual(s.count(), 6)
self.assertEqual(set(s.distinctValues()), set(['cc', 'aaaa', 'bbbb', 'eeee', '', '9']))
self.assertEqual(s.countMissing(), 1)
self.assertEqual(s.min(), '9')
self.assertEqual(s.max(), 'eeee')
if __name__ == '__main__':
unittest.main()
|
Imvoo/MiniZinc-server
|
refs/heads/master
|
server.py
|
1
|
#imports
import pymzn
import os
import re
import tempfile
import shutil
import time
from threading import Thread
from eventlet.green import subprocess
from flask import Flask, json, Response, request, render_template
from flask_socketio import SocketIO, emit
import eventlet
eventlet.monkey_patch(all=True)
#setup
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
#sockets
socketio = SocketIO(app)
#REST
folder = 'models' #where the .mzn files are stored
models = []
for file in os.listdir(folder):
if file.endswith('.mzn'):
models.append(file)
def FindArgs(model):
output = [dict(),dict()] #[0] are inputs, [1] are outputs
file = open(folder + "/" + model + ".mzn")
for line in file:
line = line.split('%', 1)[0]
if re.compile("^.*:.\w+;").match(line):
if line.find("var") == -1: #an input
tokens = re.compile('\w+').findall(line)
if (tokens[0] == 'array'):
output[0][tokens[-1]] = 'array(' + tokens[-2] + ')'
else:
output[0][tokens[-1]] = tokens[-2]
else: #an output
tokens = re.compile('\w+').findall(line)
if (tokens[0] == 'array'):
output[1][tokens[-1]] = 'array(' + tokens[-2] + ')'
else:
output[1][tokens[-1]] = tokens[-2]
return output
def FindArgsProper(model):
directory = os.path.dirname(os.path.realpath(__file__))
jsonArgs = ''
with subprocess.Popen(["mzn2fzn", "--model-interface-only", folder + '/' + model + ".mzn"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p: #-a outputs all solutions
for line in p.stdout:
jsonArgs += line
return json.loads(jsonArgs)
@app.route('/get_template/<string:model>')
def GetTemplate(model):
template = {
"error": "no template found"
}
for file in os.listdir('app_templates'):
if file == model + '.json':
tempFile = open('./app_templates/' + model + '.json', 'r')
template = json.load(tempFile)
return json.jsonify(template)
@app.route('/save_template', methods=['GET', 'POST'])
def SaveTemplate():
if not os.path.exists('./app_templates/'):
os.makedirs('./app_templates/', exist_ok=True)
modelName = request.json['name']
del request.json['name']
file = open('./app_templates/' + modelName + '.json', 'w')
json.dump(request.json, file, indent=4)
file.close()
return "Success"
@app.route('/models')
def Allmodels():
return json.jsonify(models)
@app.route('/models/<string:model>')
@app.route('/models/<string:model>.mzn')
@app.route('/models/<string:model>.json')
def Arguments(model):
if (model+".mzn" in models):
tmpArgs = FindArgsProper(model)
return json.jsonify(tmpArgs)
else:
return json.jsonify(error="no model found")
#REST
#inputs models musn't 'output'
@app.route('/solve/<string:model>')
def Model(model):
mzn_args = ''
for p in request.args.keys():
mzn_args += str(p) + "=" + str(request.args.get(p)) + ";"
if (model+".mzn" in models):
def output_line():
directory = os.path.dirname(os.path.realpath(__file__))
realPath = directory + "/" + folder + "/" + model+".mzn"
# TODO: change this into it's separate process / real path.
with subprocess.Popen(["minizinc", folder + "/" + model + ".mzn", "-a", "-D", mzn_args],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p: #-a outputs all solutions
allSolutions = []
currentSolution = dict()
markup = ['----------']
finish = ['==========']
for line in p.stdout:
if line.rstrip() in markup: #each new solution is a new JSON object
if currentSolution: # If currentSolution is not empty
allSolutions.append(currentSolution.copy())
currentSolution.clear()
elif line.rstrip() in finish:
yield str(allSolutions).replace("\'", "\"")
else:
solution = pymzn.parse_dzn(line) #use pymzn to turn output into nice JSON objects
currentSolution.update(solution)
return Response(output_line(), mimetype='text/json')
else:
return json.jsonify(error="no model found")
# TODO: Unsure if this is safe security wise, have to look into it.
# aka. CORS request.
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
#sockets
@app.route('/stream/<string:model>')
def stream(model):
arguments = {
'model': model,
}
for arg in request.args.keys():
arguments[arg] = request.args.get(arg)
return render_template('index.html', **arguments)
user_dict = dict()
@socketio.on('request_solution')
def request_solution(data):
mzn_args = ''
for key in data:
if key != 'model':
if 'dim' in data[key]:
if data[key]['dim'] == 2:
mzn_args += key + "=["
for row in data[key]['value']:
mzn_args += "| "
mzn_args += ', '.join(map(str,row))
mzn_args += ' '
mzn_args += " |];"
else:
mzn_args += key + "=" + str(data[key]['value']) + ";"
else:
mzn_args += key + "=" + str(data[key]['value']) + ";"
with tempfile.TemporaryDirectory() as tmpDirName:
# Copy model file to temp folder.
shutil.copy2(folder + '/' + data['model']+".mzn", tmpDirName + '/')
# Create data file in temp folder to feed into MiniZinc.
tmpFile = tempfile.NamedTemporaryFile(suffix='.dzn', delete=False, dir=tmpDirName)
tmpFile.seek(0)
tmpFile.write(str.encode(mzn_args.replace('\'', '\"')))
tmpFile.truncate()
tmpFile.close()
with subprocess.Popen(["minizinc", tmpDirName + '/' + data['model']+".mzn", "-a", "-d", tmpFile.name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as p: #-a outputs all solutions
user_dict[request.sid] = p
currentSolution = dict()
markup = ['----------','==========']
for line in p.stdout:
if line.rstrip() in markup: #each new solution is a new JSON object
if currentSolution: # If currentSolution is not empty
# This isn't actually needed, but oh well :D
thread = Thread(target=sendPacket, kwargs=(currentSolution.copy()))
thread.start()
# THIS DELAY RIGHT HERE...
# LITERALLY HOURS SPENT TRYING TO WORK OUT WHY PACKETS AREN'T SENDING...
# AND THIS FIXES IT???
# Oh well, just don't remove this line and we'll be fine.
# I tried adding an extra 0, but it sends too fast with it, so this is
# a good amount.
# I think the reason this works is it gives a chance for flask-socketio to
# actually send out the packets instead of trying to compute a solution.
# This also stops the front-end lagging as not all the packets come in
# at the EXACT same time :).
time.sleep(0.01)
currentSolution.clear()
else:
solution = pymzn.parse_dzn(line) #use pymzn to turn output into nice JSON objects
currentSolution.update(solution)
def sendPacket(**currentSolution):
socketio.emit('solution', currentSolution)
@socketio.on('kill_solution')
def kill_solution():
p = user_dict[request.sid]
p.kill()
#run
if __name__ == '__main__':
socketio.run(app)
|
BorgERP/borg-erp-6of3
|
refs/heads/master
|
addons/hr_contract/hr_contract.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields, osv
class hr_employee(osv.osv):
_name = "hr.employee"
_description = "Employee"
_inherit = "hr.employee"
def _get_latest_contract(self, cr, uid, ids, field_name, args, context=None):
res = {}
obj_contract = self.pool.get('hr.contract')
for emp in self.browse(cr, uid, ids, context=context):
contract_ids = obj_contract.search(cr, uid, [('employee_id','=',emp.id),], order='date_start', context=context)
if contract_ids:
res[emp.id] = contract_ids[-1:][0]
else:
res[emp.id] = False
return res
_columns = {
'manager': fields.boolean('Is a Manager'),
'medic_exam': fields.date('Medical Examination Date'),
'place_of_birth': fields.char('Place of Birth', size=30),
'children': fields.integer('Number of Children'),
'vehicle': fields.char('Company Vehicle', size=64),
'vehicle_distance': fields.integer('Home-Work Distance', help="In kilometers"),
'contract_ids': fields.one2many('hr.contract', 'employee_id', 'Contracts'),
'contract_id':fields.function(_get_latest_contract, string='Contract', type='many2one', relation="hr.contract", help='Latest contract of the employee'),
}
hr_employee()
class hr_contract_type(osv.osv):
_name = 'hr.contract.type'
_description = 'Contract Type'
_columns = {
'name': fields.char('Contract Type', size=32, required=True),
}
hr_contract_type()
class hr_contract(osv.osv):
_name = 'hr.contract'
_description = 'Contract'
_columns = {
'name': fields.char('Contract Reference', size=64, required=True),
'employee_id': fields.many2one('hr.employee', "Employee", required=True),
'department_id': fields.related('employee_id','department_id', type='many2one', relation='hr.department', string="Department", readonly=True),
'type_id': fields.many2one('hr.contract.type', "Contract Type", required=True),
'job_id': fields.many2one('hr.job', 'Job Title'),
'date_start': fields.date('Start Date', required=True),
'date_end': fields.date('End Date'),
'trial_date_start': fields.date('Trial Start Date'),
'trial_date_end': fields.date('Trial End Date'),
'working_hours': fields.many2one('resource.calendar','Working Schedule'),
'wage': fields.float('Wage', digits=(16,2), required=True, help="Basic Salary of the employee"),
'advantages': fields.text('Advantages'),
'notes': fields.text('Notes'),
'permit_no': fields.char('Work Permit No', size=256, required=False, readonly=False),
'visa_no': fields.char('Visa No', size=64, required=False, readonly=False),
'visa_expire': fields.date('Visa Expire Date'),
}
def _get_type(self, cr, uid, context=None):
type_ids = self.pool.get('hr.contract.type').search(cr, uid, [('name', '=', 'Employee')])
return type_ids and type_ids[0] or False
_defaults = {
'date_start': lambda *a: time.strftime("%Y-%m-%d"),
'type_id': _get_type
}
def _check_dates(self, cr, uid, ids, context=None):
for contract in self.read(cr, uid, ids, ['date_start', 'date_end'], context=context):
if contract['date_start'] and contract['date_end'] and contract['date_start'] > contract['date_end']:
return False
return True
_constraints = [
(_check_dates, 'Error! contract start-date must be lower then contract end-date.', ['date_start', 'date_end'])
]
hr_contract()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jcpowermac/ansible-modules-extras
|
refs/heads/devel
|
database/__init__.py
|
12133432
| |
digimarc/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/__init__.py
|
12133432
| |
MeGotsThis/BotGotsThis
|
refs/heads/master
|
tests/cache/__init__.py
|
12133432
| |
iNecas/katello
|
refs/heads/master
|
cli/test/katello/tests/utils/__init__.py
|
12133432
| |
912/M-new
|
refs/heads/master
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/tests/__init__.py
|
12133432
| |
kkreis/espressopp
|
refs/heads/master
|
src/interaction/AngularUniqueCosineSquared.py
|
5
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*************************************************
espressopp.interaction.AngularUniqueCosineSquared
*************************************************
Calculates the angular unique cosine squared interaction.
.. math::
U = K (cos(\theta) - cos(\theta_{0}))^2
.. function:: espressopp.interaction.AngularUniqueCosineSquared(K)
:param K: (default: 1.0)
:type K: real
.. function:: espressopp.interaction.FixedTripleAngleListAngularUniqueCosineSquared(system, ftcl, potential)
:param system:
:param ftcl:
:param potential:
:type system:
:type ftcl:
:type potential:
.. function:: espressopp.interaction.FixedTripleAngleListAngularUniqueCosineSquared.getFixedTripleList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedTripleAngleListAngularUniqueCosineSquared.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi
from espressopp.esutil import *
from espressopp.interaction.AngularUniquePotential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_AngularUniqueCosineSquared, \
interaction_FixedTripleAngleListAngularUniqueCosineSquared
class AngularUniqueCosineSquaredLocal(AngularUniquePotentialLocal, interaction_AngularUniqueCosineSquared):
def __init__(self, K=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_AngularUniqueCosineSquared, K)
class FixedTripleAngleListAngularUniqueCosineSquaredLocal(InteractionLocal, interaction_FixedTripleAngleListAngularUniqueCosineSquared):
def __init__(self, system, ftcl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedTripleAngleListAngularUniqueCosineSquared, system, ftcl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getFixedTripleList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedTripleList(self)
if pmi.isController:
class AngularUniqueCosineSquared(AngularUniquePotential):
'The AngularUniqueCosineSquared potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.AngularUniqueCosineSquaredLocal',
pmiproperty = ['K']
)
class FixedTripleAngleListAngularUniqueCosineSquared(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedTripleAngleListAngularUniqueCosineSquaredLocal',
pmicall = ['setPotential','getFixedTripleList']
)
|
nju520/portia
|
refs/heads/master
|
slybot/slybot/tests/test_baseurl.py
|
24
|
"""
Tests for apply_annotations
"""
from unittest import TestCase
from slybot.baseurl import insert_base_url, get_base_url
from scrapely.htmlpage import HtmlPage
class TestApplyAnnotations(TestCase):
def test_insert_base_relative(self):
"""Replace relative base href"""
html_in = '<html><head><base href="products/"><body></body></html>'
html_target = '<html><head><base href="http://localhost:8000/products/" />\
<body></body></html>'
html_out = insert_base_url(html_in, "http://localhost:8000/")
self.assertEqual(html_out, html_target)
def test_insert_base_noreplace(self):
"""base tag dont need to be replaced"""
html_in = html_target = '<html><head><base href="http://localhost:8000/products/"><body></body></html>'
html_out = insert_base_url(html_in, "http://localhost:8000/users/blog.html")
self.assertEqual(html_out, html_target)
def test_insert_base_addbase(self):
"""add base tag when not present"""
html_in = '<html><head><meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\
<body></body></html>'
html_target = '<html><head><base href="http://localhost:8000/" />\
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\
<body></body></html>'
html_out = insert_base_url(html_in, "http://localhost:8000/")
self.assertEqual(html_out, html_target)
def test_insert_base_commented(self):
"""Test weird case when base tag is commented in origin"""
html_in = '<html><head><!-- <base href="http://example.com/"> --></head>\
<body>Body</body></html>'
html_target = '<html><head><base href="http://example.com/" />\
<!-- <base href="http://example.com/"> --></head><body>Body</body></html>'
html_out = insert_base_url(html_in, "http://example.com/")
self.assertEqual(html_out, html_target)
def test_insert_base_nohead(self):
"""Test base insert when no head element is present"""
html_in = '<html><body>Body</body></html>'
html_target = '<html>\n\
<head><base href="http://localhost:8000/" /></head>\n\
<body>Body</body></html>'
html_out = insert_base_url(html_in, "http://localhost:8000/")
self.assertEqual(html_out, html_target)
def test_get_base_url(self):
"""Basic get_base_url test"""
html = u'<html><head><base href="http://example.com/products/" />\
<body></body></html>'
page = HtmlPage("http://example.com/products/p19.html", body=html)
self.assertEqual(get_base_url(page), "http://example.com/products/")
def test_get_base_url_nobase(self):
"""Base tag does not exists"""
html = u'<html><head><body></body></html>'
page = HtmlPage("http://example.com/products/p19.html", body=html)
self.assertEqual(get_base_url(page), "http://example.com/products/p19.html")
def test_get_base_url_empty_basehref(self):
"""Base tag exists but href is empty"""
html = u'<html><head><base href="" />\
<body></body></html>'
url = "http://example.com/products/p19.html"
page = HtmlPage(url, body=html)
self.assertEqual(get_base_url(page), url)
|
Nashenas88/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/echo_close_data_wsh.py
|
258
|
#!/usr/bin/python
from mod_pywebsocket import msgutil
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
# This example handler accepts any request. See origin_check_wsh.py for how
# to reject access from untrusted scripts based on origin value.
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
if line == _GOODBYE_MESSAGE:
return
request.ws_stream.send_message(line, binary=False)
|
rixrix/servo
|
refs/heads/master
|
tests/wpt/harness/wptrunner/config.py
|
196
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import ConfigParser
import os
import sys
from collections import OrderedDict
here = os.path.split(__file__)[0]
class ConfigDict(dict):
def __init__(self, base_path, *args, **kwargs):
self.base_path = base_path
dict.__init__(self, *args, **kwargs)
def get_path(self, key, default=None):
if key not in self:
return default
path = self[key]
os.path.expanduser(path)
return os.path.abspath(os.path.join(self.base_path, path))
def read(config_path):
config_path = os.path.abspath(config_path)
config_root = os.path.split(config_path)[0]
parser = ConfigParser.SafeConfigParser()
success = parser.read(config_path)
assert config_path in success, success
subns = {"pwd": os.path.abspath(os.path.curdir)}
rv = OrderedDict()
for section in parser.sections():
rv[section] = ConfigDict(config_root)
for key in parser.options(section):
rv[section][key] = parser.get(section, key, False, subns)
return rv
def path(argv=None):
if argv is None:
argv = []
path = None
for i, arg in enumerate(argv):
if arg == "--config":
if i + 1 < len(argv):
path = argv[i + 1]
elif arg.startswith("--config="):
path = arg.split("=", 1)[1]
if path is not None:
break
if path is None:
if os.path.exists("wptrunner.ini"):
path = os.path.abspath("wptrunner.ini")
else:
path = os.path.join(here, "..", "wptrunner.default.ini")
return os.path.abspath(path)
def load():
return read(path(sys.argv))
|
Workday/OpenFrame
|
refs/heads/master
|
native_client_sdk/src/build_tools/sdk_tools/third_party/__init__.py
|
175
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Third-party sdk tools packages."""
|
savoirfairelinux/sous-chef
|
refs/heads/dev
|
src/note/migrations/0010_auto_20170313_1442.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-13 18:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('note', '0009_auto_20170116_1543'),
]
operations = [
migrations.AlterField(
model_name='note',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Author'),
),
migrations.AlterField(
model_name='note',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='notes', to='note.NoteCategory', verbose_name='Category'),
),
migrations.AlterField(
model_name='note',
name='priority',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, related_name='notes', to='note.NotePriority', verbose_name='Priority'),
),
]
|
flisky/django-guardian
|
refs/heads/devel
|
guardian/testapp/tests/shortcuts_test.py
|
12
|
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.db.models.query import QuerySet
from django.test import TestCase
from guardian.shortcuts import get_perms_for_model
from guardian.core import ObjectPermissionChecker
from guardian.compat import get_user_model
from guardian.compat import get_user_permission_full_codename
from guardian.shortcuts import assign
from guardian.shortcuts import assign_perm
from guardian.shortcuts import remove_perm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_objects_for_user
from guardian.shortcuts import get_objects_for_group
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import NotUserNorGroup
from guardian.exceptions import WrongAppError
from guardian.testapp.tests.core_test import ObjectPermissionTestCase
from guardian.models import Group, Permission
import warnings
User = get_user_model()
user_app_label = User._meta.app_label
user_module_name = User._meta.module_name
class ShortcutsTests(ObjectPermissionTestCase):
def test_get_perms_for_model(self):
self.assertEqual(get_perms_for_model(self.user).count(), 3)
self.assertTrue(list(get_perms_for_model(self.user)) ==
list(get_perms_for_model(User)))
self.assertEqual(get_perms_for_model(Permission).count(), 3)
model_str = 'contenttypes.ContentType'
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(ContentType).values_list()))
obj = ContentType()
self.assertEqual(
sorted(get_perms_for_model(model_str).values_list()),
sorted(get_perms_for_model(obj).values_list()))
class AssignPermTest(ObjectPermissionTestCase):
"""
Tests permission assigning for user/group and object.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, assign_perm,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, assign_perm,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_assign_perm(self):
assign_perm("change_contenttype", self.user, self.ctype)
assign_perm("change_contenttype", self.group, self.ctype)
self.assertTrue(self.user.has_perm("change_contenttype", self.ctype))
def test_group_assign_perm(self):
assign_perm("change_contenttype", self.group, self.ctype)
assign_perm("delete_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertTrue(check.has_perm("change_contenttype", self.ctype))
self.assertTrue(check.has_perm("delete_contenttype", self.ctype))
def test_user_assign_perm_global(self):
perm = assign_perm("contenttypes.change_contenttype", self.user)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
def test_group_assign_perm_global(self):
perm = assign_perm("contenttypes.change_contenttype", self.group)
self.assertTrue(self.user.has_perm("contenttypes.change_contenttype"))
self.assertTrue(isinstance(perm, Permission))
def test_deprecation_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
assign("contenttypes.change_contenttype", self.group)
self.assertEqual(len(warns), 1)
self.assertTrue(isinstance(warns[0].message, DeprecationWarning))
class RemovePermTest(ObjectPermissionTestCase):
"""
Tests object permissions removal.
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, remove_perm,
perm="change_object",
user_or_group="Not a Model",
obj=self.ctype)
def test_global_wrong_perm(self):
self.assertRaises(ValueError, remove_perm,
perm="change_site", # for global permissions must provide app_label
user_or_group=self.user)
def test_user_remove_perm(self):
# assign perm first
assign_perm("change_contenttype", self.user, self.ctype)
remove_perm("change_contenttype", self.user, self.ctype)
self.assertFalse(self.user.has_perm("change_contenttype", self.ctype))
def test_group_remove_perm(self):
# assign perm first
assign_perm("change_contenttype", self.group, self.ctype)
remove_perm("change_contenttype", self.group, self.ctype)
check = ObjectPermissionChecker(self.group)
self.assertFalse(check.has_perm("change_contenttype", self.ctype))
def test_user_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign_perm(perm, self.user)
remove_perm(perm, self.user)
self.assertFalse(self.user.has_perm(perm))
def test_group_remove_perm_global(self):
# assign perm first
perm = "contenttypes.change_contenttype"
assign_perm(perm, self.group)
remove_perm(perm, self.group)
app_label, codename = perm.split('.')
perm_obj = Permission.objects.get(codename=codename,
content_type__app_label=app_label)
self.assertFalse(perm_obj in self.group.permissions.all())
class GetPermsTest(ObjectPermissionTestCase):
"""
Tests get_perms function (already done at core tests but left here as a
placeholder).
"""
def test_not_model(self):
self.assertRaises(NotUserNorGroup, get_perms,
user_or_group=None,
obj=self.ctype)
def test_user(self):
perms_to_assign = ("change_contenttype",)
for perm in perms_to_assign:
assign_perm("change_contenttype", self.user, self.ctype)
perms = get_perms(self.user, self.ctype)
for perm in perms_to_assign:
self.assertTrue(perm in perms)
class GetUsersWithPermsTest(TestCase):
"""
Tests get_users_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_users_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertEqual(list(result), [])
result = get_users_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign_perm("change_contenttype", self.user1, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user3, self.obj2)
result = get_users_with_perms(self.obj1)
result_vals = result.values_list('username', flat=True)
self.assertEqual(
set(result_vals),
set([user.username for user in (self.user1, self.user2)]),
)
def test_users_groups_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
result = get_users_with_perms(self.obj1).values_list('id',
flat=True)
self.assertEqual(
set(result),
set([u.id for u in (self.user1, self.user2)])
)
def test_users_groups_after_removal(self):
self.test_users_groups_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1).values_list('id',
flat=True)
self.assertEqual(
set(result),
set([self.user2.id]),
)
def test_attach_perms(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
self.user3.groups.add(self.group3)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group3, self.obj2)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.user3, self.obj2)
# Check contenttype1
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {
self.user1: ["change_contenttype"],
self.user2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
# Check contenttype2
result = get_users_with_perms(self.obj2, attach_perms=True)
expected = {
self.user3: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
def test_attach_groups_only_has_perms(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True)
expected = {self.user1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_mixed(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj1)
assign_perm("delete_contenttype", self.user2, self.obj2)
assign_perm("change_contenttype", self.user3, self.obj2)
assign_perm("change_%s" % user_module_name, self.user3, self.user1)
result = get_users_with_perms(self.obj1)
self.assertEqual(
set(result),
set([self.user1, self.user2]),
)
def test_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
assign_perm("change_contenttype", self.user1, self.obj1)
result = get_users_with_perms(self.obj1, with_superusers=True)
self.assertEqual(
set(result),
set([self.user1, admin]),
)
def test_without_group_users(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False)
expected = set([self.user2])
self.assertEqual(set(result), expected)
def test_without_group_users_but_perms_attached(self):
self.user1.groups.add(self.group1)
self.user2.groups.add(self.group2)
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.user2, self.obj1)
assign_perm("change_contenttype", self.group2, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
attach_perms=True)
expected = {self.user2: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_without_group_users_no_result(self):
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, attach_perms=True,
with_group_users=False)
expected = {}
self.assertEqual(result, expected)
def test_without_group_users_no_result_but_with_superusers(self):
admin = User.objects.create(username='admin', is_superuser=True)
self.user1.groups.add(self.group1)
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_users_with_perms(self.obj1, with_group_users=False,
with_superusers=True)
expected = [admin]
self.assertEqual(set(result), set(expected))
class GetGroupsWithPerms(TestCase):
"""
Tests get_groups_with_perms function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_empty(self):
result = get_groups_with_perms(self.obj1)
self.assertTrue(isinstance(result, QuerySet))
self.assertFalse(bool(result))
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertTrue(isinstance(result, dict))
self.assertFalse(bool(result))
def test_simple(self):
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], self.group1)
def test_simple_after_removal(self):
self.test_simple()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(len(result), 0)
def test_simple_attach_perms(self):
assign_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {self.group1: ["change_contenttype"]}
self.assertEqual(result, expected)
def test_simple_attach_perms_after_removal(self):
self.test_simple_attach_perms()
remove_perm("change_contenttype", self.group1, self.obj1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
self.assertEqual(len(result), 0)
def test_mixed(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group1, self.obj2)
assign_perm("change_%s" % user_module_name, self.group1, self.user3)
assign_perm("change_contenttype", self.group2, self.obj2)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("change_%s" % user_module_name, self.group3, self.user1)
result = get_groups_with_perms(self.obj1)
self.assertEqual(set(result), set([self.group1, self.group2]))
def test_mixed_attach_perms(self):
assign_perm("change_contenttype", self.group1, self.obj1)
assign_perm("change_contenttype", self.group1, self.obj2)
assign_perm("change_group", self.group1, self.group3)
assign_perm("change_contenttype", self.group2, self.obj2)
assign_perm("change_contenttype", self.group2, self.obj1)
assign_perm("delete_contenttype", self.group2, self.obj1)
assign_perm("change_group", self.group3, self.group1)
result = get_groups_with_perms(self.obj1, attach_perms=True)
expected = {
self.group1: ["change_contenttype"],
self.group2: ["change_contenttype", "delete_contenttype"],
}
self.assertEqual(result.keys(), expected.keys())
for key, perms in result.items():
self.assertEqual(set(perms), set(expected[key]))
class GetObjectsForUser(TestCase):
def setUp(self):
self.user = User.objects.create(username='joe')
self.group = Group.objects.create(name='group')
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
def test_superuser(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_true(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes, with_superuser=True)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_false(self):
self.user.is_superuser = True
ctypes = ContentType.objects.all()
obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
assign_perm('change_contenttype', self.user, obj1)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes, with_superuser=False)
self.assertEqual(set([obj1]), set(objects))
def test_anonymous(self):
self.user = AnonymousUser()
ctypes = ContentType.objects.all()
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
assign_perm('change_contenttype', self.user, obj1)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ctypes)
self.assertEqual(set([obj1]), set(objects))
def test_mixed_perms(self):
codenames = [
get_user_permission_full_codename('change'),
'auth.change_permission',
]
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, codenames)
def test_perms_with_mixed_apps(self):
codenames = [
get_user_permission_full_codename('change'),
'contenttypes.change_contenttype',
]
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, codenames)
def test_mixed_perms_and_klass(self):
self.assertRaises(MixedContentTypeError, get_objects_for_user,
self.user, ['auth.change_group'], User)
def test_no_app_label_nor_klass(self):
self.assertRaises(WrongAppError, get_objects_for_user, self.user,
['change_group'])
def test_empty_perms_sequence(self):
self.assertEqual(
set(get_objects_for_user(self.user, [], Group.objects.all())),
set()
)
def test_perms_single(self):
perm = 'auth.change_group'
assign_perm(perm, self.user, self.group)
self.assertEqual(
set(get_objects_for_user(self.user, perm)),
set(get_objects_for_user(self.user, [perm])))
def test_klass_as_model(self):
assign_perm('contenttypes.change_contenttype', self.user, self.ctype)
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'], ContentType)
self.assertEqual([obj.name for obj in objects], [self.ctype.name])
def test_klass_as_manager(self):
assign_perm('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_klass_as_queryset(self):
assign_perm('auth.change_group', self.user, self.group)
objects = get_objects_for_user(self.user, ['auth.change_group'],
Group.objects.all())
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_ensure_returns_queryset(self):
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('change_group', self.user, group)
objects = get_objects_for_user(self.user, ['auth.change_group'])
self.assertEqual(len(objects), len(groups))
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects),
set(groups))
def test_multiple_perms_to_check(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('auth.change_group', self.user, group)
assign_perm('auth.delete_group', self.user, groups[1])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'])
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[1].name]))
def test_multiple_perms_to_check_no_groups(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm('auth.change_group', self.user, group)
assign_perm('auth.delete_group', self.user, groups[1])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'], use_groups=False)
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[1].name]))
def test_any_of_multiple_perms_to_check(self):
group_names = ['group1', 'group2', 'group3']
groups = [Group.objects.create(name=name) for name in group_names]
assign_perm('auth.change_group', self.user, groups[0])
assign_perm('auth.delete_group', self.user, groups[2])
objects = get_objects_for_user(self.user, ['auth.change_group',
'auth.delete_group'], any_perm=True)
self.assertEqual(len(objects), 2)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list('name', flat=True)),
set([groups[0].name, groups[2].name]))
def test_groups_perms(self):
group1 = Group.objects.create(name='group1')
group2 = Group.objects.create(name='group2')
group3 = Group.objects.create(name='group3')
groups = [group1, group2, group3]
for group in groups:
self.user.groups.add(group)
# Objects to operate on
ctypes = list(ContentType.objects.all().order_by('id'))
assign_perm('change_contenttype', self.user, ctypes[0])
assign_perm('change_contenttype', self.user, ctypes[1])
assign_perm('delete_contenttype', self.user, ctypes[1])
assign_perm('delete_contenttype', self.user, ctypes[2])
assign_perm('change_contenttype', groups[0], ctypes[3])
assign_perm('change_contenttype', groups[1], ctypes[3])
assign_perm('change_contenttype', groups[2], ctypes[4])
assign_perm('delete_contenttype', groups[0], ctypes[0])
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype',
'contenttypes.delete_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1]))
objects = get_objects_for_user(self.user,
['contenttypes.change_contenttype'])
self.assertEqual(
set(objects.values_list('id', flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]))
class GetObjectsForGroup(TestCase):
"""
Tests get_objects_for_group function.
"""
def setUp(self):
self.obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='guardian-tests')
self.obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='guardian-tests')
self.obj3 = ContentType.objects.create(name='ct3', model='baz',
app_label='guardian-tests')
self.user1 = User.objects.create(username='user1')
self.user2 = User.objects.create(username='user2')
self.user3 = User.objects.create(username='user3')
self.group1 = Group.objects.create(name='group1')
self.group2 = Group.objects.create(name='group2')
self.group3 = Group.objects.create(name='group3')
def test_mixed_perms(self):
codenames = [
get_user_permission_full_codename('change'),
'auth.change_permission',
]
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, codenames)
def test_perms_with_mixed_apps(self):
codenames = [
get_user_permission_full_codename('change'),
'contenttypes.contenttypes.change_contenttype',
]
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, codenames)
def test_mixed_perms_and_klass(self):
self.assertRaises(MixedContentTypeError, get_objects_for_group,
self.group1, ['auth.change_group'], User)
def test_no_app_label_nor_klass(self):
self.assertRaises(WrongAppError, get_objects_for_group, self.group1,
['change_contenttype'])
def test_empty_perms_sequence(self):
self.assertEqual(
set(get_objects_for_group(self.group1, [], ContentType)),
set()
)
def test_perms_single(self):
perm = 'contenttypes.change_contenttype'
assign_perm(perm, self.group1, self.obj1)
self.assertEqual(
set(get_objects_for_group(self.group1, perm)),
set(get_objects_for_group(self.group1, [perm]))
)
def test_klass_as_model(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1,
['contenttypes.change_contenttype'], ContentType)
self.assertEqual([obj.name for obj in objects], [self.obj1.name])
def test_klass_as_manager(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, ['change_contenttype'],
ContentType.objects)
self.assertEqual(list(objects), [self.obj1])
def test_klass_as_queryset(self):
assign_perm('contenttypes.change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, ['change_contenttype'],
ContentType.objects.all())
self.assertEqual(list(objects), [self.obj1])
def test_ensure_returns_queryset(self):
objects = get_objects_for_group(self.group1, ['contenttypes.change_contenttype'])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group1, self.obj2)
objects = get_objects_for_group(self.group1, 'contenttypes.change_contenttype')
self.assertEqual(len(objects), 2)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects),
set([self.obj1, self.obj2]))
def test_simple_after_removal(self):
self.test_simple()
remove_perm('change_contenttype', self.group1, self.obj1)
objects = get_objects_for_group(self.group1, 'contenttypes.change_contenttype')
self.assertEqual(len(objects), 1)
self.assertEqual(objects[0], self.obj2)
def test_multiple_perms_to_check(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group1, self.obj1)
assign_perm('change_contenttype', self.group1, self.obj2)
objects = get_objects_for_group(self.group1, [
'contenttypes.change_contenttype',
'contenttypes.delete_contenttype'])
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(objects[0], self.obj1)
def test_any_of_multiple_perms_to_check(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group1, self.obj1)
assign_perm('add_contenttype', self.group1, self.obj2)
assign_perm('delete_contenttype', self.group1, self.obj3)
objects = get_objects_for_group(self.group1,
['contenttypes.change_contenttype',
'contenttypes.delete_contenttype'], any_perm=True)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual([obj for obj in objects.order_by('name')],
[self.obj1, self.obj3])
def test_results_for_different_groups_are_correct(self):
assign_perm('change_contenttype', self.group1, self.obj1)
assign_perm('delete_contenttype', self.group2, self.obj2)
self.assertEqual(set(get_objects_for_group(self.group1, 'contenttypes.change_contenttype')),
set([self.obj1]))
self.assertEqual(set(get_objects_for_group(self.group2, 'contenttypes.change_contenttype')),
set())
self.assertEqual(set(get_objects_for_group(self.group2, 'contenttypes.delete_contenttype')),
set([self.obj2]))
|
madscatt/sasmol
|
refs/heads/master
|
src/python/test_sasmol/test_sascalc/test_unit_sascalc_Prop_calcrmsd.py
|
1
|
'''
SASMOL: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from sasmol.test_sasmol.util import env, util
from unittest import main
from mocker import Mocker, MockerTestCase, ANY, ARGS, KWARGS
import sasmol.sasmol as sasmol
import numpy
import warnings; warnings.filterwarnings('ignore')
import os
floattype=os.environ['SASSIE_FLOATTYPE']
class Test_sascalc_Prop_calcrmsd(MockerTestCase):
def setUp(self):
self.o1=sasmol.SasMol(0)
self.o2=sasmol.SasMol(0)
def calc_exp(self):
c1 = numpy.array((self.o1.coor()[0]),floattype)
c2 = numpy.array((self.o2.coor()[0]),floattype)
return numpy.sqrt(numpy.sum((c1-c2)**2)/len(c1))
def test_null(self):
self.o1.setCoor(numpy.zeros((1,0,3),floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.zeros((1,0,3),floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
self.assertTrue(numpy.isnan(result_rmsd))
def test_one_overlap_atom(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[1.0, 2.0, 3.0]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = 0.0
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def test_one_nonoverlap_atom(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[4.0, 5.0, 6.0]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = 3.0*numpy.sqrt(3.0)
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def test_two_atoms(self):
self.o1.setCoor(numpy.array([[[7.0, 8.0, 9.0],[1.0, 3.0, 5.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[12.0, 53.0, 67.0],[76.0, 87.0, 96.0]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = self.calc_exp()
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def test_six_atoms(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0],[4.0, 5.0, 6.0],[7.0, 8.0, 9.0],[1.0, 3.0, 5.0],[2.0, 4.0, 6.0],[0.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[2.0, 12.0, 35.0],[12.0, 53.0, 67.0],[76.0, 87.0, 96.0],[12.0, 33.0, 52.0],[2.3, 4.3, 6.8],[0.0, 22.5,33.6]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = self.calc_exp()
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def test_six_atoms_inf1(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0],[4.0, 5.0, 6.0],[7.0, 8.0, 9.0],[1.0, 3.0, 5.0],[2.0, util.HUGE, 6.0],[0.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[2.0, 12.0, 35.0],[12.0, util.HUGE, 67.0],[76.0, 87.0, 96.0],[12.0, 33.0, 52.0],[2.3, 4.3, 6.8],[0.0, 22.5,33.6]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = util.INF
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def test_6_atoms_inf2(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0],[util.INF, 5.0, 6.0],[7.0, 8.0, 9.0],[1.0, 3.0, 5.0],[2.0, 4.0, 6.0],[0.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[2.0, 12.0, 35.0],[12.0, 53.0, 67.0],[76.0, 87.0, util.INF],[12.0, 33.0, 52.0],[2.3, 4.3, 6.8],[0.0, 22.5,33.6]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = util.INF
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def test_6_atoms_nan(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0],[util.NAN, 5.0, 6.0],[7.0, 8.0, 9.0],[1.0, 3.0, 5.0],[2.0, 4.0, 6.0],[0.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[2.0, 12.0, 35.0],[12.0, 53.0, 67.0],[76.0, 87.0, util.NAN],[12.0, 33.0, 52.0],[2.3, 4.3, 6.8],[0.0, 22.5,33.6]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
self.assertTrue(numpy.isnan(result_rmsd))
def test_6_atoms_tiny(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0],[util.TINY, 5.0, 6.0],[7.0, 8.0, 9.0],[1.0, 3.0, 5.0],[2.0, 4.0, 6.0],[0.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[2.0, 12.0, 35.0],[12.0, 53.0, 67.0],[76.0, 87.0, util.TINY],[12.0, 33.0, 52.0],[2.3, 4.3, 6.8],[0.0, 22.5,33.6]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = self.calc_exp()
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def test_6_atoms_zero(self):
self.o1.setCoor(numpy.array([[[1.0, 2.0, 3.0],[util.ZERO, 5.0, 6.0],[7.0, 8.0, 9.0],[1.0, 3.0, 5.0],[2.0, 4.0, 6.0],[0.0, 2.0, 3.0]]],floattype))
self.o1.setNatoms(len(self.o1._coor[0]))
self.o2.setCoor(numpy.array([[[2.0, 12.0, 35.0],[12.0, 53.0, 67.0],[76.0, 87.0, util.ZERO],[12.0, 33.0, 52.0],[2.3, 4.3, 6.8],[0.0, 22.5,33.6]]],floattype))
self.o2.setNatoms(len(self.o2._coor[0]))
result_rmsd = self.o1.calcrmsd(self.o2)
expected_rmsd = self.calc_exp()
self.assertAlmostEqual(expected_rmsd, result_rmsd)
def tearDown(self):
pass
if __name__ == '__main__':
main()
|
louyihua/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/mongo/__init__.py
|
268
|
"""
Provide names as exported by older mongo.py module
"""
from xmodule.modulestore.mongo.base import MongoModuleStore, MongoKeyValueStore
# Backwards compatibility for prod systems that refererence
# xmodule.modulestore.mongo.DraftMongoModuleStore
from xmodule.modulestore.mongo.draft import DraftModuleStore as DraftMongoModuleStore
|
msegado/edx-platform
|
refs/heads/master
|
common/djangoapps/django_comment_common/signals.py
|
101
|
# pylint: disable=invalid-name
"""Signals related to the comments service."""
from django.dispatch import Signal
thread_created = Signal(providing_args=['user', 'post'])
thread_edited = Signal(providing_args=['user', 'post'])
thread_voted = Signal(providing_args=['user', 'post'])
thread_deleted = Signal(providing_args=['user', 'post'])
comment_created = Signal(providing_args=['user', 'post'])
comment_edited = Signal(providing_args=['user', 'post'])
comment_voted = Signal(providing_args=['user', 'post'])
comment_deleted = Signal(providing_args=['user', 'post'])
comment_endorsed = Signal(providing_args=['user', 'post'])
|
sharifelguindi/qatrackplus
|
refs/heads/master
|
qatrack/data_tables/views.py
|
1
|
import json
import urllib
from django.db.models import Q
from django.http import HttpResponse
from django.views.generic import ListView
#============================================================================
class BaseDataTablesDataSource(ListView):
"""This view serves a page with a pre-rendered data table"""
model = None
queryset = None
initial_orderings = []
max_display_length = 500
page_title = "Generic Data Tables Template View"
#---------------------------------------------------------------------------
def render_to_response(self, context):
if self.request.is_ajax():
return HttpResponse(json.dumps(context), content_type='application/json')
else:
return super(BaseDataTablesDataSource, self).render_to_response(context)
#---------------------------------------------------------------------------
def get_context_data(self, *args, **kwargs):
context = super(BaseDataTablesDataSource, self).get_context_data(*args, **kwargs)
table_data = self.get_table_context_data(context)
if self.request.is_ajax():
return table_data
else:
context.update(table_data)
return self.get_template_context_data(context)
#----------------------------------------------------------------------
def get_table_context_data(self, base_context):
"""return a dictionary of all required values for rendering a Data Table"""
all_objects = base_context["object_list"]
self.set_search_filter_context()
self.set_columns()
self.set_orderings()
self.set_filters()
self.filtered_objects = all_objects.filter(*self.filters).order_by(*self.orderings)
self.set_current_page_objects()
self.tabulate_data()
context = {
"data": self.table_data,
"iTotalRecords": all_objects.count(),
"iTotalDisplayRecords": self.filtered_objects.count(),
"sEcho": self.search_filter_context.get("sEcho"),
}
return context
#----------------------------------------------------------------------
def set_search_filter_context(self):
"""create a search and filter context, overridng any cookie values
with request values. This is required when "Sticky" DataTables filters
are used """
self.search_filter_context = {}
try:
for k, v in self.request.COOKIES.items():
if k.startswith("SpryMedia_DataTables"):
break
else:
raise KeyError
cookie_filters = json.loads(urllib.unquote(v))
for idx, search in enumerate(cookie_filters["aoSearchCols"]):
for k, v in search.items():
self.search_filter_context["%s_%d" % (k, idx)] = v
self.search_filter_context["iSortingCols"] = 0
for idx, (col, dir_, _) in enumerate(cookie_filters["aaSorting"]):
self.search_filter_context["iSortCol_%d" % (idx)] = col
self.search_filter_context["sSortDir_%d" % (idx)] = dir_
self.search_filter_context["iSortingCols"] += 1
self.search_filter_context["iDisplayLength"] = cookie_filters["iLength"]
self.search_filter_context["iDisplayStart"] = cookie_filters["iStart"]
self.search_filter_context["iDisplayEnd"] = cookie_filters["iEnd"]
except KeyError:
pass
self.search_filter_context.update(self.request.GET.dict())
#---------------------------------------------------------------------------
def set_columns(self):
"""Return an interable (of length N-Columns) of three-tuples consisting of:
1) A callable which accepts a model instance and returns the display value
for the column e.g. lambda instance: instance.name
2) A string representing a Django filter for this column (e.g. name__icontains)
or None to disable filtering
3) A string representing a model field to order instances (e.g. name) This can also be
an iterable of strings to handle generic foreign key cases (e.g.
(mycontenttype1__name, mycontenttype2__someotherfield)
This function must be overridden in child class"""
self.columns = ()
raise NotImplementedError
#----------------------------------------------------------------------
def set_orderings(self):
"""Figure out which columns user wants to order on"""
n_orderings = int(self.search_filter_context.get("iSortingCols", 0))
if n_orderings == 0:
self.orderings = self.initial_orderings
return
order_cols = []
for x in range(n_orderings):
col = int(self.search_filter_context.get("iSortCol_%d" % x))
direction = "" if self.search_filter_context.get("sSortDir_%d" % x, "asc") == "asc" else "-"
order_cols.append((col, direction))
self.orderings = []
for col, direction in order_cols:
display, search, ordering = self.columns[col]
if ordering:
if isinstance(ordering, basestring):
self.orderings.append("%s%s" % (direction, ordering))
else:
for o in ordering:
self.orderings.append("%s%s" % (direction, o))
#----------------------------------------------------------------------
def set_filters(self):
"""Create filters made up of Q objects"""
self.filters = []
for col, (display, search, ordering) in enumerate(self.columns):
search_term = self.search_filter_context.get("sSearch_%d" % col)
if search and search_term:
if search_term == "null":
search_term = None
if not isinstance(search, basestring):
#handle case where we are filtering on a Generic Foreign Key field
f = Q()
for s, ct in search:
f |= Q(**{s: search_term, "content_type": ct})
else:
f = Q(**{search: search_term})
self.filters.append(f)
#----------------------------------------------------------------------
def set_current_page_objects(self):
per_page = int(self.search_filter_context.get("iDisplayLength", self.max_display_length))
per_page = min(per_page, self.max_display_length)
offset = int(self.search_filter_context.get("iDisplayStart", 0))
self.cur_page_objects = self.filtered_objects[offset:offset + per_page]
#----------------------------------------------------------------------
def tabulate_data(self):
self.table_data = []
for obj in self.cur_page_objects:
row = []
for col, (display, search, ordering) in enumerate(self.columns):
if callable(display):
display = display(obj)
row.append(display)
self.table_data.append(row)
#----------------------------------------------------------------------
def get_page_title(self):
return self.page_title
#----------------------------------------------------------------------
def get_template_context_data(self, context):
context["page_title"] = self.get_page_title()
return context
|
salguarnieri/intellij-community
|
refs/heads/master
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_methodattrs.py
|
326
|
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
|
weigj/django-multidb
|
refs/heads/master
|
tests/regressiontests/admin_scripts/management/commands/__init__.py
|
12133432
| |
jtyr/ansible-modules-extras
|
refs/heads/devel
|
storage/__init__.py
|
12133432
| |
gannetson/django
|
refs/heads/master
|
tests/delete_regress/__init__.py
|
12133432
| |
XiaosongWei/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-csp-w3c-tests/csp-py/csp_plugin-types_blocked-manual.py
|
30
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "plugin-types image/jpeg"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_plugin-types_blocked</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://w3c.github.io/webappsec/specs/content-security-policy/csp-specification.dev.html#plugin-types"/>
<meta name="flags" content=""/>
<meta name="assert" content="object-src 'self'"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no blue</strong>.</p>
<object data="support/blue-100x100.png"/>
</body>
</html> """
|
totallybradical/temp_servo2
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/py/testing/process/test_cmdexec.py
|
163
|
import py
from py.process import cmdexec
def exvalue():
return py.std.sys.exc_info()[1]
class Test_exec_cmd:
def test_simple(self):
out = cmdexec('echo hallo')
assert out.strip() == 'hallo'
assert py.builtin._istext(out)
def test_simple_newline(self):
import sys
out = cmdexec(r"""%s -c "print ('hello')" """ % sys.executable)
assert out == 'hello\n'
assert py.builtin._istext(out)
def test_simple_error(self):
py.test.raises (cmdexec.Error, cmdexec, 'exit 1')
def test_simple_error_exact_status(self):
try:
cmdexec('exit 1')
except cmdexec.Error:
e = exvalue()
assert e.status == 1
assert py.builtin._istext(e.out)
assert py.builtin._istext(e.err)
def test_err(self):
try:
cmdexec('echoqweqwe123 hallo')
raise AssertionError("command succeeded but shouldn't")
except cmdexec.Error:
e = exvalue()
assert hasattr(e, 'err')
assert hasattr(e, 'out')
assert e.err or e.out
|
Endika/django
|
refs/heads/master
|
django/contrib/sitemaps/apps.py
|
590
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SiteMapsConfig(AppConfig):
name = 'django.contrib.sitemaps'
verbose_name = _("Site Maps")
|
Osmose/kitsune
|
refs/heads/master
|
kitsune/kbadge/badges.py
|
22
|
from django.conf import settings
from django.dispatch import receiver
from badger.signals import badge_was_awarded
from kitsune.kbadge.tasks import send_award_notification
@receiver(badge_was_awarded)
def notify_award_recipient(sender, award, **kwargs):
"""Notifies award recipient that he/she has an award!"""
# -dev and -stage have STAGE = True which means they won't send
# notification emails of newly awarded badges which would spam the
# bejesus out of everyone.
if not settings.STAGE:
# Kick off the task to send the email
send_award_notification.delay(award)
|
mariusbaumann/pyload
|
refs/heads/stable
|
module/plugins/hooks/LinkdecrypterCom.py
|
1
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.MultiHook import MultiHook
class LinkdecrypterCom(MultiHook):
__name__ = "LinkdecrypterCom"
__type__ = "hook"
__version__ = "1.00"
__config__ = [("mode" , "all;listed;unlisted", "Use for crypters (if supported)" , "all"),
("pluginlist" , "str" , "Crypter list (comma separated)" , "" ),
("interval" , "int" , "Reload interval in hours (0 to disable)" , 12 )]
__description__ = """Linkdecrypter.com hook plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def getCrypters(self):
try:
html = self.getURL("http://linkdecrypter.com/")
return re.search(r'>Supported\(\d+\)</b>: <i>(.+?) \+ RSDF', html).group(1).split(', ')
except Exception:
return list()
|
uni-peter-zheng/tp-libvirt
|
refs/heads/master
|
libguestfs/tests/guestfish_augeas.py
|
7
|
"""
This file is used to run autotest cases related to augeas
"""
import re
import logging
from autotest.client.shared import error
from virttest import utils_test
def prepare_image(params):
"""
1) Create a image
2) Create file system on the image
"""
params["image_path"] = utils_test.libguestfs.preprocess_image(params)
if not params.get("image_path"):
raise error.TestFail("Image could not be created for some reason")
gf = utils_test.libguestfs.GuestfishTools(params)
status, output = gf.create_fs()
if status is False:
gf.close_session()
raise error.TestFail(output)
gf.close_session()
def test_aug_clear(vm, params):
"""
Clear augeas path
1) Create a new augeas handle
2) Set the home directory of root user to /root
3) Clear the home directory of root user
4) Check if the path have been cleared
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0 ")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root/home", "/root")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root/home to /root. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root/home to /root successfully")
aug_clear_result = gf.aug_clear("/files/etc/passwd/root/home")
if aug_clear_result.exit_status:
gf.close_session()
raise error.TestFail("Can not clean /files/etc/passwd/root/home. GSERROR_MSG: %s" % aug_clear_result)
logging.info("Clear augeas /files/etc/passwd/root/home successfully")
aug_get_result = gf.aug_get("/files/etc/passwd/root/home")
if not aug_get_result.exit_status:
gf.close_session()
raise error.TestFail("The home directory of root user should be cleared after aug-clear")
logging.info("Clean the home directory of root user successfully")
gf.close_session()
def test_aug_close(vm, params):
"""
Close the current augeas handle and free up any resources used by it.
After calling this, you have to call "aug_init" again before you can use
any other augeas functions.
1) Create a new augeas handle
2) Close the current augeas handle
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_close_result = gf.aug_close()
if aug_close_result.exit_status:
gf.close_session()
raise error.TestFail("Can not close augeas handle. GSERROR_MSG: %s" % aug_close_result)
logging.info("Close augeas handle successfully")
gf.close_session()
def test_aug_defnode(vm, params):
"""
Defines a variable "name" whose value is the result of evaluating
"expr".
1) Create a new augeas handle
2) Define an augeas node
3) Check the value of the node
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_defnode_result = gf.aug_defnode("node", "/files/etc/passwd/test/uid", "9999")
if aug_defnode_result.exit_status:
gf.close_session()
raise error.TestFail("Can not define node. GSERROR_MSG: %s" % aug_defnode_result)
logging.info("Define node successfully")
aug_defnode_result = gf.aug_defnode("node", "/files/etc/passwd/test/gid", "9999")
if aug_defnode_result.exit_status:
gf.close_session()
raise error.TestFail("Can not define node /files/etc/passwd/test/gid. GSERROR_MSG: %s" % aug_defnode_result)
logging.info("Define node /files/etc/passwd/test/gid successfully")
aug_ls_result = gf.aug_ls("/files/etc/passwd/test")
if aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("Can not list augeas nodes under /files/etc/passwd/test. GSERROR_MSG: %s" % aug_ls_result)
logging.info("List augeas nodes under /files/etc/passwd/test successfully")
if aug_ls_result.stdout.strip('\n') != '/files/etc/passwd/test/gid\n/files/etc/passwd/test/uid':
gf.close_session()
raise error.TestFail("The node value is not correct: %s" % aug_ls_result.stdout)
logging.info("The node value is correct")
gf.close_session()
def test_aug_defvar(vm, params):
"""
Define an augeas variable
1) Create a new augeas handle
2) Define an augeas variable
3) Check the value of the variable
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_defvar_result = gf.aug_defvar("test", "'This is a test'")
if aug_defvar_result.exit_status:
gf.close_session()
raise error.TestFail("Can not define variable. GSERROR_MSG: %s" % aug_defvar_result)
logging.info("Define variable successfully")
aug_get_result = gf.aug_get("/augeas/variables/test")
if aug_get_result.exit_status:
gf.close_session()
raise error.TestFail("Can not look up the value of /augeas/variables/test. GSERROR_MSG:%s" % aug_get_result)
logging.info("Look up the value of /augeas/variables/test successfully")
if aug_get_result.stdout.strip('\n') != 'This is a test':
gf.close_session()
raise error.TestFail("The variable value is not correct %s != This is a test" % aug_get_result.stdout.strip('\n'))
logging.info("The variable value is correct")
gf.close_session()
def test_aug_set_get(vm, params):
"""
Look up the value of an augeas path
1) Create a new augeas handle
2) Set a new augeas node
3) Get the new augeas node
4) Check the value of the augeas path
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root/password", "9999")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root/password to 9999. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root/password to 9999 successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root/home", "/root")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root/home to /root. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root/home to /root successfully")
aug_get_result_password = gf.aug_get("/files/etc/passwd/root/password")
if aug_get_result_password.exit_status:
gf.close_session()
raise error.TestFail("Can not get the value of /files/etc/passwd/root/password. GSERROR_MSG: %s" % aug_get_result_password)
logging.info("Get the value of /files/etc/passwd/root/password successfully")
aug_get_result_home = gf.aug_get("/files/etc/passwd/root/home")
if aug_get_result_home.exit_status:
gf.close_session()
raise error.TestFail("Can not get the value of /files/etc/passwd/root/home. GSERROR_MSG: %s" % aug_get_result_home)
logging.info("Get the value of /files/etc/passwd/root/home successfully")
if aug_get_result_password.stdout.strip('\n') != "9999" or aug_get_result_home.stdout.strip('\n') != '/root':
gf.close_session()
raise error.TestFail("The value of /files/etc/passwd/root/password and /files/etc/passwd/root/home is not correct. root password %s != 9999, root home %s != /root" % (aug_get_result_password.stdout.strip('\n'), aug_get_result_home.stdout.strip('\n')))
logging.info("The value of /files/etc/passwd/root/password and /files/etc/passwd/root/home is correct")
gf.close_session()
def test_aug_init(vm, params):
"""
Create a new augeas handle
1) Create a new augeas handle and set the flag to 0
2) Create a new augeas handle and set the flag to 1
3) Create a new augeas handle and set the flag to 8
4) Create a new augeas handle and set the flag to 16
5) Create a new augeas handle and set the flag to 32
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle with flag = 0. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle with flag = 0 successfully")
aug_init_result = gf.aug_init("/", "1")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle with flag = 1. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle with flag = 1 successfully")
aug_init_result = gf.aug_init("/", "8")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle with flag = 8. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle with flag = 8 successfully")
aug_init_result = gf.aug_init("/", "16")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle with flag = 16. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle with flag = 16 successfully")
aug_init_result = gf.aug_init("/", "32")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle with flag = 32. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle with flag = 32 successfully")
gf.close_session()
def test_aug_insert(vm, params):
"""
Look up the value of an augeas path
1) Create a new augeas handle
2) Set augeas path to value
3) Insert a sibling augeas node
4) Check the status of the new insert node
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
mkdir_p_result = gf.mkdir_p('/usr/share/augeas/lenses/dist')
if mkdir_p_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create directory /usr/share/augeas/lenses/dist. GSERROR_MSG: %s" % mkdir_p_result)
logging.info("Create directory /usr/share/augeas/lenses/dist successfully")
mkdir_result = gf.mkdir('/etc')
if mkdir_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create directory /etc. GSERROR_MSG: %s" % mkdir_result)
logging.info("Create directory /etc successfully")
upload_result = gf.upload('/usr/share/augeas/lenses/dist/passwd.aug', '/usr/share/augeas/lenses/dist/passwd.aug')
if upload_result.exit_status:
gf.close_session()
raise error.TestFail("Can not upload file /usr/share/augeas/lenses/dist/passwd.aug. GSERROR_MSG: %s" % upload_result)
logging.info("upload file /usr/share/augeas/lenses/dist/passwd.aug successfully")
upload_result = gf.upload('/etc/passwd', '/etc/passwd')
if upload_result.exit_status:
gf.close_session()
raise error.TestFail("Can not upload file /etc/passwd. GSERROR_MSG: %s" % upload_result)
logging.info("upload file /etc/passwd successfully")
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_insert_result = gf.aug_insert("/files/etc/passwd/root/name", "testbefore", "true")
if aug_insert_result.exit_status:
gf.close_session()
raise error.TestFail("Can not insert testbefore before /files/etc/passwd/root/name. GSERROR_MSG: %s" % aug_insert_result)
logging.info("Insert testbefore before /files/etc/passwd/root/name successfully")
aug_insert_result = gf.aug_insert("/files/etc/passwd/root/name", "testafter", "false")
if aug_insert_result.exit_status:
gf.close_session()
raise error.TestFail("Can not insert testafter after /files/etc/passwd/root/name. GSERROR_MSG: %s" % aug_insert_result)
logging.info("Insert testafter after /files/etc/passwd/root/name successfully")
command_result = gf.inner_cmd("aug-match /files/etc/passwd/root/* |egrep 'name|test'")
if command_result.exit_status:
gf.close_session()
raise error.TestFail("Failed to run the command. GSERROR_MSG: %s" % command_result)
if command_result.stdout.strip('\n') != '/files/etc/passwd/root/testbefore\n/files/etc/passwd/root/name\n/files/etc/passwd/root/testafter':
gf.close_session()
raise error.TestFail("The match results is not correct. GSERROR_MSG: %s" % command_result.stdout)
gf.close_session()
def test_aug_ls(vm, params):
"""
List augeas nodes under augpath
1) Create a new augeas handle
2) Create two new nodes
3) List the two new nodes
4) Check the results
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root", "0")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root to 0. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root to 0 successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/mysql", "1")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/mysql to 1. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/mysql to 1 successfully")
aug_ls_result = gf.aug_ls("/files/etc/passwd")
if aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("Can not list path /files/etc/passwd. GSERROR_MSG: %s" % aug_ls_result)
logging.info("List path /files/etc/passwd successfully")
if aug_ls_result.stdout.strip('\n') != '/files/etc/passwd/mysql\n/files/etc/passwd/root':
gf.close_session()
raise error.TestFail("aug-ls list the wrong results. GSERROR_MSG: %s" % aug_ls_result.stdout)
logging.info("aug-ls list the right results")
aug_ls_result = gf.aug_ls("/files/etc/passwd/")
if not aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("aug_ls: can use aug-ls with a path that ends with /")
aug_ls_result = gf.aug_ls("/files/etc/passwd/*")
if not aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("aug_ls: can use aug-ls with a path that ends with *")
aug_ls_result = gf.aug_ls("/files/etc/passwd/node[1]")
if not aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("aug_ls: can use aug-ls with a path that ends with ]")
gf.close_session()
def test_aug_match(vm, params):
"""
List augeas nodes under augpath
1) Create a new augeas handle
2) Create two new nodes
3) Match one of the two nodes
4) Check the results
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root", "0")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root to 0. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root to 0 successfully")
aug_set_result = gf.aug_set("/files/etc/host/home", "1")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/host/home to 1. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/host/home to 1 successfully")
aug_set_result = gf.aug_set("/files/etc/config/root", "2")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/config/root to 2. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/config/root to 2 successfully")
aug_match_result = gf.aug_match("/files/etc/*/root")
if aug_match_result.exit_status:
gf.close_session()
raise error.TestFail("Can not return augeas nodes which match /files/etc/*/root. GSERROR_MSG: %s" % aug_match_result)
logging.info("Can return augeas nodes which match /files/etc/*/root successfully")
if aug_match_result.stdout.strip('\n') != '/files/etc/passwd/root\n/files/etc/config/root':
gf.close_session()
raise error.TestFail("The match results is not correct. GSERROR_MSG: %s" % aug_match_result.stdout)
gf.close_session()
def test_aug_mv(vm, params):
"""
Move augeas node
1) Create a new augeas handle
2) Create a new node
3) Move the node to other place
4) Check the results
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root", "0")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root to 0. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root to 0 successfully")
aug_mv_result = gf.aug_mv("/files/etc/passwd/root", "/files/etc/passwd/other_none_root")
if aug_mv_result.exit_status:
gf.close_session()
raise error.TestFail("Can not move /files/etc/passwd/root to /files/etc/passwd/other_none_root. GSERROR_MSG: %s" % aug_mv_result)
aug_ls_result = gf.aug_ls("/files/etc/passwd")
if aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("Can not list augeas nodes under /files/etc/passwd. GSERROR_MSG: %s" % aug_ls_result)
logging.info("List augeas nodes under /files/etc/passwd successfully")
if aug_ls_result.stdout.strip('\n') != '/files/etc/passwd/other_none_root':
gf.close_session()
raise error.TestFail("aug-mv: can not find the new node /files/etc/passwd/other_none_root")
gf.close_session()
def test_aug_rm(vm, params):
"""
Move augeas node
1) Create a new augeas handle
2) Create a new node
3) Remove the node
4) Check the results
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root", "0")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root to 0. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root to 0 successfully")
aug_rm_result = gf.aug_rm("/files/etc/passwd/root")
if aug_rm_result.exit_status:
gf.close_session()
raise error.TestFail("Can not remove /files/etc/passwd/root. GSERROR_MSG: %s" % aug_rm_result)
aug_ls_result = gf.aug_ls("/files/etc/passwd")
if aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("Can not list augeas nodes under /files/etc/passwd. GSERROR_MSG: %s" % aug_ls_result)
logging.info("List augeas nodes under /files/etc/passwd successfully")
if aug_ls_result.stdout.strip('\n') == '/files/etc/passwd/root':
gf.close_session()
raise error.TestFail("aug-rm: failed to remove node /files/etc/passwd/root")
gf.close_session()
def test_aug_label(vm, params):
"""
Return the label from an augeas path expression
1) Create a new augeas handle
2) Create a new node
3) Return the label from an augeas path expression
4) Check the results
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root", "0")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root to 0. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root to 0 successfully")
aug_label_result = gf.aug_label("/files/etc/passwd/root")
if aug_label_result.exit_status:
gf.close_session()
raise error.TestFail("Can not get the label of /files/etc/passwd/root. GSERROR_MSG: %s" % aug_label_result)
if aug_label_result.stdout.strip('\n') != 'root':
gf.close_session()
raise error.TestFail("aug-label return the wrong lable")
gf.close_session()
def test_aug_setm(vm, params):
"""
Set multiple augeas nodes
1) Create a new augeas handle
2) Create multiple augeas nodes
2) Set multiple augeas nodes
3) Check the results
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root/uid", "0")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root/uid to 0. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root/uid to 0 successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/mysql/uid", "1")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/mysql/uid to 1. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/mysql/uid to 1 successfully")
aug_setm_result = gf.aug_setm("/files/etc/passwd/*", "uid", "2")
if aug_setm_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set multiple augeas nodes. GSERROR_MSG: %s" % aug_setm_result)
aug_get_result_root = gf.aug_get("/files/etc/passwd/root/uid")
if aug_get_result_root.exit_status:
gf.close_session()
raise error.TestFail("Can not get the value of /files/etc/passwd/root/uid. GSERROR_MSG: %s" % aug_get_result_root)
aug_get_result_mysql = gf.aug_get("/files/etc/passwd/mysql/uid")
if aug_get_result_mysql.exit_status:
gf.close_session()
raise error.TestFail("Can not get the value of /files/etc/passwd/mysql/uid. GSERROR_MSG: %s" % aug_get_result_mysql)
if aug_get_result_root.stdout.strip('\n') != '2' or aug_get_result_mysql.stdout.strip('\n') != '2':
gf.close_session()
raise error.TestFail("aug-setm set the wrong value. GSERROR_MSG: root = %s, mysql = %s" % (aug_get_result_root.stdout.strip('\n'), aug_get_result_mysql.stdout.strip('\n')))
gf.close_session()
def test_aug_load(vm, params):
"""
Load files into the tree
1) Create a new augeas handle
2) upload files
3) Load files into the tree
4) Check the load results
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_ls_result = gf.aug_ls("/files/etc")
if aug_ls_result.exit_status:
gf.close_session()
raise error.TestFail("Can not list augeas nodes under /files/etc. GSERROR_MSG: %s" % aug_ls_result)
logging.info("List augeas nodes under /files/etc successfully")
mkdir_p_result = gf.mkdir_p('/usr/share/augeas/lenses/dist')
if mkdir_p_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create directory /usr/share/augeas/lenses/dist. GSERROR_MSG: %s" % mkdir_p_result)
logging.info("Create directory /usr/share/augeas/lenses/dist successfully")
mkdir_result = gf.mkdir('/etc')
if mkdir_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create directory /etc. GSERROR_MSG: %s" % mkdir_result)
logging.info("Create directory /etc successfully")
upload_result = gf.upload('/usr/share/augeas/lenses/dist/passwd.aug', '/usr/share/augeas/lenses/dist/passwd.aug')
if upload_result.exit_status:
gf.close_session()
raise error.TestFail("Can not upload file /usr/share/augeas/lenses/dist/passwd.aug. GSERROR_MSG: %s" % upload_result)
logging.info("upload file /usr/share/augeas/lenses/dist/passwd.aug successfully")
upload_result = gf.upload('/etc/passwd', '/etc/passwd')
if upload_result.exit_status:
gf.close_session()
raise error.TestFail("Can not upload file /etc/passwd. GSERROR_MSG: %s" % upload_result)
logging.info("upload file /etc/passwd successfully")
aug_load_result = gf.aug_load()
if aug_load_result.exit_status:
gf.close_session()
raise error.TestFail("Can not load files into the tree. GSERROR_MSG: %s" % aug_load_result)
logging.info("Load files into tree successfully")
aug_ls_load_result = gf.aug_ls("/files/etc")
if aug_ls_load_result.exit_status:
gf.close_session()
raise error.TestFail("Can not list augeas nodes under /files/etc. GSERROR_MSG: %s" % aug_ls_load_result)
logging.info("List augeas nodes under /files/etc successfully")
if aug_ls_result.stdout.strip('\n') != '' or aug_ls_load_result.stdout.strip('\n') != '/files/etc/passwd':
gf.close_session()
raise error.TestFail("Failed to load the tree.")
gf.close_session()
def test_aug_save(vm, params):
"""
Write all pending augeas changes to disk
1) upload files
2) Create a new augeas handle
3) Change the home directory of root user to /tmp/root
4) Write the changes to disk
5) Exit guestfish
6) Add the image again
7) Create a new augeas handle
8) Get the home directory of root user
9) Check the home directory of root user
"""
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_point = params["mount_point"]
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
mkdir_p_result = gf.mkdir_p('/usr/share/augeas/lenses/dist')
if mkdir_p_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create directory /usr/share/augeas/lenses/dist. GSERROR_MSG: %s" % mkdir_p_result)
logging.info("Create directory /usr/share/augeas/lenses/dist successfully")
mkdir_result = gf.mkdir('/etc')
if mkdir_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create directory /etc. GSERROR_MSG: %s" % mkdir_result)
logging.info("Create directory /etc successfully")
upload_result = gf.upload('/usr/share/augeas/lenses/dist/passwd.aug', '/usr/share/augeas/lenses/dist/passwd.aug')
if upload_result.exit_status:
gf.close_session()
raise error.TestFail("Can not upload file /usr/share/augeas/lenses/dist/passwd.aug. GSERROR_MSG: %s" % upload_result)
logging.info("upload file /usr/share/augeas/lenses/dist/passwd.aug successfully")
upload_result = gf.upload('/etc/passwd', '/etc/passwd')
if upload_result.exit_status:
gf.close_session()
raise error.TestFail("Can not upload file /etc/passwd. GSERROR_MSG: %s" % upload_result)
logging.info("upload file /etc/passwd successfully")
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_set_result = gf.aug_set("/files/etc/passwd/root/home", "/tmp/root")
if aug_set_result.exit_status:
gf.close_session()
raise error.TestFail("Can not set /files/etc/passwd/root/home to /tmp/root. GSERROR_MSG: %s" % aug_set_result)
logging.info("Set /files/etc/passwd/root/home to /tmp/root successfully")
aug_save_result = gf.aug_save()
if aug_save_result.exit_status:
gf.close_session()
raise error.TestFail("Can not save changes to disk. GSERROR_MSG: %s" % aug_save_result)
logging.info("Save changes to disk successfully")
gf.close_session()
gf = utils_test.libguestfs.GuestfishTools(params)
gf.add_drive_opts(image_path, readonly=readonly)
# Launch
run_result = gf.run()
if run_result.exit_status:
gf.close_session()
raise error.TestFail("Can not launch. GSERROR_MSG: %s" % run_result)
logging.info("Launch successfully")
# mount the device
mount_result = gf.mount_options("noatime", mount_point, "/")
if mount_result.exit_status:
gf.close_session()
raise error.TestFail("Can not mount %s to /. GSERROR_MSG: %s" % (mount_point, mount_result))
logging.info("mount %s to / successfully" % mount_point)
aug_init_result = gf.aug_init("/", "0")
if aug_init_result.exit_status:
gf.close_session()
raise error.TestFail("Can not create a augeas handle. GSERROR_MSG: %s" % aug_init_result)
logging.info("Create augeas handle successfully")
aug_get_result = gf.aug_get("/files/etc/passwd/root/home")
if aug_get_result.exit_status:
gf.close_session()
raise error.TestFail("Can not get the home directory of root user. GSERROR_MSG: %s" % aug_get_result)
logging.info("Get the home directory of root user successfully. root directory is %s" % aug_get_result.stdout.strip('\n'))
if aug_get_result.stdout.strip('\n') != '/tmp/root':
gf.close_session()
raise error.TestFail("The home directory of root user is not correct")
gf.close_session()
def run(test, params, env):
"""
Test of built-in augeas related commands in guestfish
1) Get parameters for test
2) Set options for commands
3) Run key commands:
a. add disk or domain with readonly or not
b. launch
c. mount root device
4) Run augeas APIs inside guestfish session
5) Check results
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
if vm.is_alive():
vm.destroy()
operation = params.get("guestfish_function")
testcase = globals()["test_%s" % operation]
partition_types = params.get("partition_types")
fs_types = params.get("fs_types")
image_formats = params.get("image_formats")
for image_format in re.findall(r"\w+", image_formats):
params["image_format"] = image_format
for partition_type in re.findall(r"\w+", partition_types):
params["partition_type"] = partition_type
for fs_type in re.findall(r"\w+", fs_types):
params["fs_type"] = fs_type
prepare_image(params)
testcase(vm, params)
|
dspeyer/pithos
|
refs/heads/master
|
pithos/plugins/screensaver_pause.py
|
5
|
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: nil; -*-
### BEGIN LICENSE
# Copyright (C) 2010-2012 Kevin Mehall <km@kevinmehall.net>
#This program is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License version 3, as published
#by the Free Software Foundation.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranties of
#MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
#PURPOSE. See the GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from pithos.plugin import PithosPlugin
import logging
dbus = None
class ScreenSaverPausePlugin(PithosPlugin):
preference = 'enable_screensaverpause'
description = 'Pause playback when screensaver starts'
session_bus = None
def bind_session_bus(self):
global dbus
try:
import dbus
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
except ImportError:
return False
try:
self.session_bus = dbus.SessionBus()
return True
except dbus.DBusException:
return False
def on_enable(self):
if not self.bind_session_bus():
logging.error("Could not bind session bus")
return
self.connect_events() or logging.error("Could not connect events")
self.locked = 0
self.wasplaying = False
def on_disable(self):
if self.session_bus:
self.disconnect_events()
self.session_bus = None
def connect_events(self):
try:
self.receivers = [
self.session_bus.add_signal_receiver(*args)
for args in ((self.playPause, 'ActiveChanged', 'org.gnome.ScreenSaver'),
(self.playPause, 'ActiveChanged', 'org.cinnamon.ScreenSaver'),
(self.playPause, 'ActiveChanged', 'org.freedesktop.ScreenSaver'),
(self.pause, 'Locked', 'com.canonical.Unity.Session'),
(self.play, 'Unlocked', 'com.canonical.Unity.Session'),
)
]
return True
except dbus.DBusException:
logging.info("Enable failed")
return False
def disconnect_events(self):
try:
for r in self.receivers:
r.remove()
return True
except dbus.DBusException:
return False
def play(self):
self.locked -= 1
if self.locked < 0:
self.locked = 0
if not self.locked and self.wasplaying:
self.window.user_play()
def pause(self):
if not self.locked:
self.wasplaying = self.window.playing
self.window.pause()
self.locked += 1
def playPause(self, screensaver_on):
if screensaver_on:
self.pause()
else:
self.play()
|
djgagne/scikit-learn
|
refs/heads/master
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bunnyitvn/webptn
|
refs/heads/master
|
django/contrib/webdesign/lorem_ipsum.py
|
230
|
"""
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
|
glencoates/django-tastypie
|
refs/heads/master
|
tests/core/tests/utils.py
|
18
|
from django.http import HttpRequest
from django.test import TestCase
from tastypie.serializers import Serializer
from tastypie.utils.mime import determine_format, build_content_type
class MimeTestCase(TestCase):
def test_build_content_type(self):
self.assertEqual(build_content_type('application/json'), 'application/json; charset=utf-8')
self.assertEqual(build_content_type('application/xml'), 'application/xml; charset=utf-8')
self.assertEqual(build_content_type('application/json; charset=ascii'), 'application/json; charset=ascii')
def test_determine_format(self):
serializer = Serializer()
request = HttpRequest()
# Default.
self.assertEqual(determine_format(request, serializer), 'application/json')
# Test forcing the ``format`` parameter.
request.GET = {'format': 'json'}
self.assertEqual(determine_format(request, serializer), 'application/json')
request.GET = {'format': 'jsonp'}
self.assertEqual(determine_format(request, serializer), 'text/javascript')
request.GET = {'format': 'xml'}
self.assertEqual(determine_format(request, serializer), 'application/xml')
request.GET = {'format': 'yaml'}
self.assertEqual(determine_format(request, serializer), 'text/yaml')
request.GET = {'format': 'plist'}
self.assertEqual(determine_format(request, serializer), 'application/x-plist')
request.GET = {'format': 'foo'}
self.assertEqual(determine_format(request, serializer), 'application/json')
# Test the ``Accept`` header.
request.META = {'HTTP_ACCEPT': 'application/json'}
self.assertEqual(determine_format(request, serializer), 'application/json')
request.META = {'HTTP_ACCEPT': 'text/javascript'}
self.assertEqual(determine_format(request, serializer), 'text/javascript')
request.META = {'HTTP_ACCEPT': 'application/xml'}
self.assertEqual(determine_format(request, serializer), 'application/xml')
request.META = {'HTTP_ACCEPT': 'text/yaml'}
self.assertEqual(determine_format(request, serializer), 'text/yaml')
request.META = {'HTTP_ACCEPT': 'application/x-plist'}
self.assertEqual(determine_format(request, serializer), 'application/x-plist')
request.META = {'HTTP_ACCEPT': 'text/html'}
self.assertEqual(determine_format(request, serializer), 'text/html')
request.META = {'HTTP_ACCEPT': '*/*'}
self.assertEqual(determine_format(request, serializer), 'application/json')
request.META = {'HTTP_ACCEPT': 'application/json,application/xml;q=0.9,*/*;q=0.8'}
self.assertEqual(determine_format(request, serializer), 'application/json')
request.META = {'HTTP_ACCEPT': 'text/plain,application/xml,application/json;q=0.9,*/*;q=0.8'}
self.assertEqual(determine_format(request, serializer), 'application/xml')
request.META = {'HTTP_ACCEPT': 'application/json; charset=UTF-8'}
self.assertEqual(determine_format(request, serializer), 'application/json')
request.META = {'HTTP_ACCEPT': 'text/javascript,application/json'}
self.assertEqual(determine_format(request, serializer), 'application/json')
|
playfulgod/kernel-M865
|
refs/heads/master
|
tools/perf/scripts/python/check-perf-trace.py
|
948
|
# perf trace event handlers, generated by perf trace -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
fastavro/fastavro
|
refs/heads/master
|
fastavro/io/json_encoder.py
|
1
|
import json
from .parser import Parser
from .symbols import (
Root,
Boolean,
Int,
RecordStart,
RecordEnd,
FieldStart,
FieldEnd,
Null,
String,
Union,
UnionEnd,
Long,
Float,
Double,
Bytes,
MapStart,
MapEnd,
MapKeyMarker,
Enum,
Fixed,
ArrayStart,
ArrayEnd,
ItemEnd,
)
class AvroJSONEncoder:
"""Encoder for the avro JSON format.
NOTE: All attributes and methods on this class should be considered
private.
Parameters
----------
fo: file-like
Input stream
"""
def __init__(self, fo):
self._fo = fo
self._stack = []
self._current = None
self._key = None
self._records = []
def write_value(self, value):
if isinstance(self._current, dict):
if self._key:
self._current[self._key] = value
else:
raise Exception("No key was set")
elif isinstance(self._current, list):
self._current.append(value)
else:
# If we aren't in a dict or a list then this must be a schema which
# just has a single basic type
self._records.append(value)
def _push(self):
self._stack.append((self._current, self._key))
def _pop(self):
prev_current, prev_key = self._stack.pop()
if isinstance(prev_current, dict):
prev_current[prev_key] = self._current
self._current = prev_current
elif isinstance(prev_current, list):
prev_current.append(self._current)
self._current = prev_current
else:
assert prev_current is None
assert prev_key is None
# Back at None, we should have a full record in self._current
self._records.append(self._current)
self._current = prev_current
self._key = prev_key
def write_buffer(self):
# Newline separated
json_data = "\n".join([json.dumps(record) for record in self._records])
self._fo.write(json_data)
def configure(self, schema, named_schemas):
self._parser = Parser(schema, named_schemas, self.do_action)
def flush(self):
self._parser.flush()
def do_action(self, action):
if isinstance(action, RecordStart):
self.write_object_start()
elif isinstance(action, RecordEnd) or isinstance(action, UnionEnd):
self.write_object_end()
elif isinstance(action, FieldStart):
self.write_object_key(action.field_name)
elif isinstance(action, FieldEnd):
# TODO: Do we need a FieldEnd symbol?
pass
elif isinstance(action, Root):
self.write_buffer()
else:
raise Exception(f"Internal Exception: {action}")
def write_null(self):
self._parser.advance(Null())
self.write_value(None)
def write_boolean(self, value):
self._parser.advance(Boolean())
self.write_value(value)
def write_utf8(self, value):
self._parser.advance(String())
if self._parser.stack[-1] == MapKeyMarker():
self._parser.advance(MapKeyMarker())
self.write_object_key(value)
else:
self.write_value(value)
def write_int(self, value):
self._parser.advance(Int())
self.write_value(value)
def write_long(self, value):
self._parser.advance(Long())
self.write_value(value)
def write_float(self, value):
self._parser.advance(Float())
self.write_value(value)
def write_double(self, value):
self._parser.advance(Double())
self.write_value(value)
def write_bytes(self, value):
self._parser.advance(Bytes())
self.write_value(value.decode("iso-8859-1"))
def write_enum(self, index):
self._parser.advance(Enum())
enum_labels = self._parser.pop_symbol()
# TODO: Check symbols?
self.write_value(enum_labels.labels[index])
def write_fixed(self, value):
self._parser.advance(Fixed())
self.write_value(value.decode("iso-8859-1"))
def write_array_start(self):
self._parser.advance(ArrayStart())
self._push()
self._current = []
def write_item_count(self, length):
pass
def end_item(self):
self._parser.advance(ItemEnd())
def write_array_end(self):
self._parser.advance(ArrayEnd())
self._pop()
def write_object_start(self):
self._push()
self._current = {}
def write_object_key(self, key):
self._key = key
def write_object_end(self):
self._pop()
def write_map_start(self):
self._parser.advance(MapStart())
self.write_object_start()
def write_map_end(self):
self._parser.advance(MapEnd())
self.write_object_end()
def write_index(self, index, schema):
self._parser.advance(Union())
alternative_symbol = self._parser.pop_symbol()
symbol = alternative_symbol.get_symbol(index)
if symbol != Null():
self.write_object_start()
self.write_object_key(alternative_symbol.get_label(index))
# TODO: Do we need this symbol?
self._parser.push_symbol(UnionEnd())
self._parser.push_symbol(symbol)
|
DreamerKing/LightweightHtmlWidgets
|
refs/heads/master
|
LightweightHtmlWidgets/bin/Debug/Ipy.Lib/lib2to3/fixes/fix_methodattrs.py
|
326
|
"""Fix bound method attributes (method.im_? -> method.__?__).
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
MAP = {
"im_func" : "__func__",
"im_self" : "__self__",
"im_class" : "__self__.__class__"
}
class FixMethodattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
"""
def transform(self, node, results):
attr = results["attr"][0]
new = unicode(MAP[attr.value])
attr.replace(Name(new, prefix=attr.prefix))
|
memtoko/django
|
refs/heads/master
|
tests/m2m_regress/models.py
|
63
|
from django.contrib.auth import models as auth
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# No related name is needed here, since symmetrical relations are not
# explicitly reversible.
@python_2_unicode_compatible
class SelfRefer(models.Model):
name = models.CharField(max_length=10)
references = models.ManyToManyField('self')
related = models.ManyToManyField('self')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
# Regression for #11956 -- a many to many to the base class
@python_2_unicode_compatible
class TagCollection(Tag):
tags = models.ManyToManyField(Tag, related_name='tag_collections')
def __str__(self):
return self.name
# A related_name is required on one of the ManyToManyField entries here because
# they are both addressable as reverse relations from Tag.
@python_2_unicode_compatible
class Entry(models.Model):
name = models.CharField(max_length=10)
topics = models.ManyToManyField(Tag)
related = models.ManyToManyField(Tag, related_name="similar")
def __str__(self):
return self.name
# Two models both inheriting from a base model with a self-referential m2m field
class SelfReferChild(SelfRefer):
pass
class SelfReferChildSibling(SelfRefer):
pass
# Many-to-Many relation between models, where one of the PK's isn't an Autofield
class Line(models.Model):
name = models.CharField(max_length=100)
class Worksheet(models.Model):
id = models.CharField(primary_key=True, max_length=100)
lines = models.ManyToManyField(Line, blank=True)
# Regression for #11226 -- A model with the same name that another one to
# which it has a m2m relation. This shouldn't cause a name clash between
# the automatically created m2m intermediary table FK field names when
# running migrate
class User(models.Model):
name = models.CharField(max_length=30)
friends = models.ManyToManyField(auth.User)
class BadModelWithSplit(models.Model):
name = models.CharField(max_length=1)
def split(self):
raise RuntimeError('split should not be called')
class Meta:
abstract = True
class RegressionModelSplit(BadModelWithSplit):
"""
Model with a split method should not cause an error in add_lazy_relation
"""
others = models.ManyToManyField('self')
|
crodrigues96/mtasa-blue
|
refs/heads/master
|
vendor/google-breakpad/src/tools/gyp/test/win/gyptest-link-force-symbol-reference.py
|
237
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure ForceSymbolReference is translated properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('force-symbol-reference.gyp', chdir=CHDIR)
test.build('force-symbol-reference.gyp', test.ALL, chdir=CHDIR)
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_force_reference.exe', chdir=CHDIR))
if '?x@@YAHXZ:' not in output or '?y@@YAHXZ:' not in output:
test.fail_test()
test.pass_test()
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/completion/dunderPrepare.after.py
|
18
|
class A:
@classmethod
def __prepare__(metacls, name, bases):
class B:
@classmethod
def __prepare__(metacls, name, bases):
class C:
@classmethod
@decorator
def __prepare__(metacls, name, bases):
|
reinout/django
|
refs/heads/master
|
django/contrib/flatpages/migrations/__init__.py
|
12133432
| |
rajendrakrp/GeoMicroFormat
|
refs/heads/master
|
django/conf/locale/sr/__init__.py
|
12133432
| |
Steffo99/royal-bot
|
refs/heads/master
|
bot.py
|
1
|
# -*- coding: utf-8 -*-
import json
import time
import filemanager
import telegram
import steam
import random
import osu
import lol
import discord
import subprocess
import sm.steammatch as steammatch
import db
import cairosvg
# Elenco di username dei membri della RYG
royalgames = json.loads(filemanager.readfile("db.json"))
# Stringa dove mettere l'elenco di champion di lol gratuiti
lolfreestring = str()
# Elenco dello stagismo
stagismo = ["dello stagista", "degli sposi", "di Santinelli", "di Sensei", "di Steffo", "di Spaggia",
"della sedia", "di Satana", "del Sangue (degli occhi di Adry)", "del Sale",
"del Serpente", "della Samsung", "di /smecds", "della succursale", "di succ",
"di Sans", "di [SiivaGunner](https://www.youtube.com/channel/UC9ecwl3FTG66jIKA9JRDtmg)",
"di saaaaaas", "del semaforo", "della Seriale", "di Sistemi", "della Supercell",
"di Santaclaus", "dei Sims", "dei Santi", "di SES2017", "di Salvini", "della scolopendra"]
random.seed()
# Spostiamo qui le funzioni del bot, così è un po' più leggibile
def wow():
print("@" + username + ": WOW!")
telegram.sendmessage("Wow. Impressionante.", sentin, source)
def ahnonlosoio():
print("@" + username + ": /ahnonlosoio")
# Rispondi con Ah, non lo so nemmeno io.
telegram.sendmessage("Ah, non lo so nemmeno io!", sentin, source)
def ciaostefanino():
print("@" + username + ": /ciaostefanino")
# Rispondi salutando Stefanino.
telegram.sendmessage("Ciao Stefanino!", sentin, source)
if username != "Steffo":
telegram.sendmessage("Steffo dice:\n\"Me la pagherai!\"", fromuser, source)
else:
telegram.sendmessage("Sei improvvisamente impazzito o cosa?", fromuser, source)
def ciaoruozi():
print("@" + username + ": /ciaoruozi")
# Ciao Ruozi.
if username.lower() == "ruozir":
# Manda un messaggio casuale tra quelli nella lista
chosen_msg = random.sample(["Ciao me!",
"Ciao ciao ciao!",
"1 ciaoruozi = 1000€ per me",
"Ruozi si dice: #CiaoRuozi",
"Ciao eh me!",
"Ehi me, ma ciao!",
"Ma ciao me stesso!",
"Me me me! Ciao!"], 1)[0]
telegram.sendmessage(chosen_msg, sentin, source)
else:
# Manda un messaggio casuale tra quelli nella lista
chosen_msg = random.sample(["Ciao Ruozi!",
"Ciao ciao ciao!",
"1 ciaoruozi = 1 prayer",
"La RYG dice: #CiaoRuozi",
"Ciao eh Ruozi!",
"Ehi Ruozi, ma ciao!",
"Ma ciao Ruozi!",
"Ruozi ruozi ruozi! Ciao!"], 1)[0]
telegram.sendmessage(chosen_msg, sentin, source)
def ombromanto():
print("@" + username + ": /ombromanto")
# Ma chi è ombromanto?
telegram.sendmessage("Ombromanto è @Dailir!", sentin, source)
def steamplayers():
print("@" + username + ": /steamplayers")
# Informa Telegram che il messaggio è stato ricevuto e visualizza Royal Bot sta scrivendo.
telegram.sendchataction(sentin)
# Se è stato specificato un AppID...
if len(cmd) >= 2:
n = steam.getnumberofcurrentplayers(cmd[1])
# Se viene ricevuta una risposta...
if n is None:
telegram.sendmessage(chr(9888) + " L'app specificata non esiste!", sentin, source)
else:
name = steam.getschemaforgame(cmd[1])['game']['gameName']
telegram.sendmessage("In questo momento, *{n}* persone stanno giocando a "
"[{name}](https://steamdb.info/app/{id}/graphs/)."
.format(n=str(n), name=name, id=cmd[1]), sentin, source)
else:
telegram.sendmessage(chr(9888) + ' Non hai specificato un AppID!\n'
'La sintassi corretta è /playing <AppID>.', sentin, source)
def ehoh():
print("@" + username + ": /ehoh")
# Rispondi con Eh, oh. Sono cose che capitano.
telegram.sendmessage("Eh, oh. Sono cose che capitano.", sentin, source)
def sbam():
print("@" + username + ": /sbam")
# Manda l'audio contenente gli sbam di tutti i membri Royal Games.
telegram.senddocument('BQADAgADBwMAAh8GgAGSsR4rwmk_LwI', sentin)
def osucmd():
print("@" + username + ": /osu")
# Visualizza il punteggio più recente di osu!
# Informa Telegram che il messaggio è stato ricevuto.
telegram.sendchataction(sentin)
# Trova il nome utente specificato
# Se è stato specificato un nome utente
if len(cmd) >= 2:
# Trova la modalità
# 0 = osu!
# 1 = osu!taiko
# 2 = osu!catch
# 3 = osu!mania
# Se è stata specificata una modalità
if len(cmd) >= 3:
# Modalità specificata
mode = int(cmd[2])
else:
# Imposta la modalità a osu!
mode = 0
# Prova a mandare una richiesta ai server di osu per l'ultima canzone giocata
try:
r = osu.getuserrecent(cmd[1], mode)
# Se la funzione restituisce un errore, riferisci su Telegram l'errore e previeni il crash.
except NameError:
telegram.sendmessage(chr(9888) + " Errore nella richiesta ai server di Osu!", sentin,
source)
# Se tutto va bene, continua!
else:
# Se ci sono delle mod attive...
if "enabled_mods" in r:
mods = osu.listmods(r['enabled_mods'])
else:
mods = ""
# Specifica cosa vuole dire il grado F e il grado X
if r['rank'] == 'F':
r['rank'] = 'Failed'
elif r['rank'] == 'X':
r['rank'] = 'Unranked'
if mode == 0:
# Visualizza le informazioni relative alla modalità osu!
telegram.sendmessage("*osu!*\n"
"[Beatmap {0}](https://osu.ppy.sh/b/{0})\n"
"*{1}*\n"
"{2}\n"
"*Punti*: {3}\n"
"*Combo* x{4}\n"
"*300*: {5}\n"
"*100*: {6}\n"
"*50*: {7}\n"
"*Awesome*: {8}\n"
"*Good*: {9}\n"
"*Miss*: {10}"
.format(r['beatmap_id'],
r['rank'],
mods,
r['score'],
r['maxcombo'],
r['count300'],
r['count100'],
r['count50'],
r['countgeki'],
r['countkatu'],
r['countmiss']), sentin, source)
elif mode == 1:
# Visualizza le informazioni relative alla modalità osu!taiko
telegram.sendmessage("*osu!taiko*\n"
"[Beatmap {0}](https://osu.ppy.sh/b/{0})\n"
"*{1}*\n"
"{2}\n"
"*Punti*: {3}\n"
"*Combo* x{4}\n"
"*Great*: {5}\n"
"*Good*: {6}\n"
"_Large_ *Great*: {7}\n"
"_Large_ *Good*: {8}\n"
"*Miss*: {9}"
.format(r['beatmap_id'],
r['rank'],
mods,
r['score'],
r['maxcombo'],
r['count300'],
r['count100'],
r['countgeki'],
r['countkatu'],
r['countmiss']), sentin, source)
elif mode == 2:
# TODO: Cos'è successo qui?
# Visualizza le informazioni relative alla modalità osu!catch
telegram.sendmessage("*osu!catch*\n"
"[Beatmap " + r['beatmap_id'] + "](" + 'https://osu.ppy.sh/b/' + r[
'beatmap_id'] +
")\n*" + r['rank'] + "*\n" + mods +
"\n*Punti*: " + r['score'] + "\n"
"*Combo* x" + r['maxcombo'] + "\n"
"*Fruit*: " +
r['count300'] + "\n"
"*Droplet* _tick_: " + r['count100'] + "\n"
"*Droplet* _trail_: " +
r['count50'] + "\n"
"*Miss*: " + r['countmiss'], sentin, source)
elif mode == 3:
# TODO: Cos'è successo qui?
# Visualizza le informazioni relative alla modalità osu!mania
telegram.sendmessage("*osu!mania*\n" +
"[Beatmap " + r['beatmap_id'] + "](" + 'https://osu.ppy.sh/b/' + r[
'beatmap_id'] + ")\n*" + r['rank'] + "*\n" + mods +
"\n*Punti*: " + r['score'] + "\n"
"*Combo* x" + r['maxcombo'] + "\n"
"_Rainbow_ *300*: " +
r['countgeki'] + "\n"
"*300*: " + r['count300'] + "\n"
"*100*: " + r[
'count100'] + "\n"
"*200*: " + r['countkatu'] + "\n"
"*50*: " + r[
'count50'] + "\n"
"*Miss*: " + r['countmiss'], sentin, source)
else:
# TODO: Mettere a posto sto schifo.
if "osu" in royalgames[username.lower()]:
r = osu.getuserrecent(royalgames[username.lower()]['osu'], 0)
if "enabled_mods" in r:
mods = osu.listmods(r['enabled_mods'])
else:
mods = ""
telegram.sendmessage("*osu!*\n"
"[Beatmap {0}](https://osu.ppy.sh/b/{0})\n"
"*{1}*\n"
"{2}\n"
"*Punti*: {3}\n"
"*Combo* x{4}\n"
"*300*: {5}\n"
"*100*: {6}\n"
"*50*: {7}\n"
"*Awesome*: {8}\n"
"*Good*: {9}\n"
"*Miss*: {10}"
.format(r['beatmap_id'],
r['rank'],
mods,
r['score'],
r['maxcombo'],
r['count300'],
r['count100'],
r['count50'],
r['countgeki'],
r['countkatu'],
r['countmiss']), sentin, source)
def roll():
print("@" + username + ": /roll")
# Se è stato specificato un numero
if len(cmd) >= 2:
if cmd[1] == "tm":
telegram.sendmessage("TM è così grassa che se la lanci rotola!", sentin, source)
# Controlla che sia convertibile in un intero.
try:
m = int(cmd[1])
except ValueError:
telegram.sendmessage(chr(9888) + " Il numero specificato non è un intero.", sentin, source)
return
else:
# Imposta il numero massimo a 100.
m = 100
# Prova a generare un numero casuale.
if m == 34261891881215712181524122318242223183627453833:
telegram.sendmessage("Numero casuale da 1 a _34261891881215712181524122318242223183627453833_:\n"
"*Frank è scarso*", sentin, source)
else:
try:
n = random.randrange(m) + 1
except ValueError:
telegram.sendmessage(chr(9888) + " Il numero specificato non è maggiore o uguale a 0.",
sentin, source)
# Se tutto va bene visualizza il numero generato
else:
telegram.sendmessage("Numero casuale da 1 a " + str(m) + ":\n*" + str(n) + "*", sentin,
source)
def cv():
print("@" + username + ": /cv")
# Ottieni i dati dal server della Royal Games
r = discord.getwidgetdata("176353500710699008")
# Elenco di tutte le persone online su Discord
tosend = "*Online su Discord, nel server {servername}:*\n".format(servername=r['name'])
# Qui inizia il codice peggiore di sempre
# oddio cosa ho scritto
# aiuto
for member in r['members']:
m = dict()
if 'bot' not in member or not member['bot']:
# Se una persona è connessa in chat vocale
if 'channel_id' in member:
# Controlla il suo stato (esclusa, mutata, normale) e scegli l'emoji appropriata
if member['deaf'] or member['self_deaf']:
m['vemoji'] = chr(128263)
elif member['mute'] or member['self_mute']:
m['vemoji'] = chr(128264)
else:
m['vemoji'] = chr(128266)
m['channelname'] = discord.getchannelname(r, member['channel_id'])
# Altrimenti
else:
m['vemoji'] = ""
# Controlla il suo stato (online, in gioco, afk) e scegli l'emoji appropriata
if member['status'] == "online":
m['emoji'] = chr(128309)
elif member['status'] == "idle":
m['emoji'] = chr(9899)
elif member['status'] == "dnd":
m['emoji'] = chr(128308)
else:
# Stato sconosciuto. Fallback nel caso in cui vengano aggiunti nuovi stati.
m['emoji'] = chr(2573)
# Aggiungi il nome del gioco a destra del nome
if 'game' in member:
m['gamename'] = member['game']['name']
# Visualizza il nickname se presente, altrimenti visualizza l'username
if 'nick' in m:
member['username'] = m['nick']
else:
m['name'] = member['username']
tosend += "{emoji} {name}".format(emoji=m['emoji'], name=m['name'])
if 'channelname' in m:
tosend += " | {vemoji} *{channelname}*".format(vemoji=m['vemoji'], channelname=m['channelname'])
if 'gamename' in m:
tosend += " | _{gamename}_".format(gamename=m['gamename'])
tosend += "\n"
# Controlla se l'utente è royal music
elif member['id'] == "176358898851250176":
if 'game' in member:
tosend += "{emoji} *{channelname}* | {songname}\n" \
.format(emoji="\U0001F3B5", channelname=discord.getchannelname(r, member['channel_id']),
songname=member['game']['name'])
telegram.sendmessage(tosend, sentin, source)
def online():
# Elenco di tutte le persone online su Steam
print("@" + username + ": /online ")
telegram.sendmessage("_Funzione temporaneamente disattivata._", sentin, source)
# Informa Telegram che il messaggio è stato ricevuto.
# telegram.sendchataction(sentin)
# if len(cmd) >= 2:
# if cmd[1].lower() == "help":
# telegram.sendmessage(chr(128309) + " Online\n" +
# chr(128308) + " In gioco | Occupato\n" +
# chr(9899) + " Assente | Inattivo\n" +
# chr(128310) + " Disponibile per scambiare\n" +
# chr(128311) + " Disponibile per giocare", sentin, source)
# else:
# # Stringa utilizzata per ottenere informazioni su tutti gli utenti in una sola richiesta a steam
# userids = str()
# for membro in royalgames:
# if "steam" in royalgames[membro]:
# userids += str(royalgames[membro]["steam"]) + ','
# tosend = "*Su Steam ora:*\n"
# r = steam.getplayersummaries(userids)
# for player in r:
# # In gioco
# if 'gameextrainfo' in player:
# tosend += chr(128308) + " _" + player['gameextrainfo'] + "_ |"
# elif 'gameid' in player:
# tosend += chr(128308) + " _" + player['gameid'] + "_ |"
# # Online
# elif player['personastate'] == 1:
# tosend += chr(128309)
# # Occupato
# elif player['personastate'] == 2:
# tosend += chr(128308)
# # Assente o Inattivo
# elif player['personastate'] == 3 or player['personastate'] == 4:
# tosend += chr(9899)
# # Disponibile per scambiare
# elif player['personastate'] == 5:
# tosend += chr(128310)
# # Disponibile per giocare
# elif player['personastate'] == 6:
# tosend += chr(128311)
# if player['personastate'] != 0:
# tosend += " " + player['personaname'] + "\n"
# else:
# telegram.sendmessage(tosend, sentin, source)
def shrek():
# Manda l'audio So much to do, so much to see
print("@" + username + ": /shrekt ")
telegram.senddocument("BQADBAADsQADiBjiAqYN-EBXASyhAg", sentin)
def diario():
# Aggiungi una riga al diario Royal Games
print("@" + username + ": /diario ")
if len(cmd) > 1:
entry = text.split(" ", 1)[1]
if entry.isprintable():
entry = entry.replace("\n", " ")
fdiario = filemanager.readfile("diario.txt")
fdiario += str(int(time.time())) + "|" + entry + "\n"
filemanager.writefile("diario.txt", fdiario)
telegram.sendmessage("Aggiunto al diario RYG.", sentin, source)
else:
telegram.sendmessage(chr(9888) + " Errore nella scrittura del messaggio.\n"
"Il messaggio non è compatibile con il diario.\n"
"Probabilmente contiene emoji o caratteri speciali non visualizzabili.",
sentin, source)
else:
telegram.sendmessage(chr(9888) + " Non hai scritto niente sul diario!\n"
"Sintassi corretta: /diario _quello che vuoi scrivere_",
sentin, source)
def leggi():
# Leggi dal diario Royal Games
print("@" + username + ": /leggi")
if len(cmd) == 1:
telegram.sendmessage("[Apri il diario RYG](http://royal.steffo.me/diario.htm)!\n_(Puoi visualizzare un elemento casuale scrivendo /leggi random o leggere un elemento specifico con /leggi [numero])_", sentin, source)
elif cmd[1] == "random":
fdiario = open("diario.txt")
ldiario = fdiario.readlines()
n = random.randrange(len(ldiario))
entry = ldiario[n].strip("\n").split("|")
telegram.sendmessage(entry[1], sentin, source)
fdiario.close()
else:
try:
index = int(cmd[1])
except ValueError:
telegram.sendmessage(chr(9888) + "Il valore specificato non é un numero.", sentin, source)
return
fdiario = open("diario.txt")
ldiario = fdiario.readlines()
fdiario.close()
try:
entry = ldiario[index].strip("\n").split("|")
except IndexError:
telegram.sendmessage(chr(9888) + "Non esiste una citazione con quel numero.", sentin,source)
return
telegram.sendmessage(entry[1], sentin, source)
def balurage():
print("@" + username + ": /balurage")
# Rispondi commentando l'E3.
tosend = str()
try:
ragelevel = int(filemanager.readfile("ragelevel.txt"))
except ValueError:
ragelevel = 0
ragelevel += 1
filemanager.writefile("ragelevel.txt", str(ragelevel))
for rage in range(0, ragelevel):
tosend += "MADDEN "
telegram.sendmessage(tosend, sentin, source)
def lolfree():
# Visualizza i campioni gratuiti di LoL di questa settimana
print("@" + username + ": /lolfree")
# Informa Telegram che il messaggio è stato ricevuto.
telegram.sendchataction(sentin)
ora = time.gmtime()
if len(cmd) > 1:
refresh_requested = cmd[1].startswith("refresh")
else:
refresh_requested = False
# Controlla se i dati sono già stati scaricati.
global lolfreestring
if lolfreestring == "" or refresh_requested:
# Crea un nuovo set di dati.
print("Aggiornamento champ gratuiti di League of Legends...")
lolfreestring = "Champion gratuiti del `" + str(ora.tm_mday) + "/" + str(ora.tm_mon) + "/" + \
str(ora.tm_year) + " " + str(ora.tm_hour) + ":" + str(ora.tm_min) + "`\n"
r = lol.getfreerotation()
for champion in r:
staticdata = lol.getchampionstaticdata(champion['id'])
lolfreestring += "*" + staticdata['name'] + "* " + staticdata['title'] + '\n'
print("Completato.")
telegram.sendmessage(lolfreestring, sentin, source)
def getrygimage():
# Ricevi il link alla tua immagine del profilo Royal Games.
print("@" + username + ": /getrygimage")
cmd = text.split(" ", 1)
if len(cmd) > 1:
if len(cmd[1]) > 2:
telegram.sendmessage("Puoi mettere solo due lettere.")
else:
# FIXME: percorsi assoluti
directory = "/var/www/html/rygimages/{}.png".format(cmd[1])
infile = open("basiclogo.svg", "rb")
# FIXME: possibile exploit qui
indata = infile.read().replace(b"REPLACEME", cmd[1].encode("utf-8"))
infile.close()
try:
outfile = open(directory, "x")
outfile.write("")
outfile.close()
except FileExistsError:
pass
outfile = open(directory, "wb")
outdata = cairosvg.svg2png(bytestring=indata)
print(outdata)
outfile.write(outdata)
outfile.close()
telegram.sendmessage("[Scarica](http://royal.steffo.me/rygimages/{}.png)"
" la tua immagine del profilo Royal Games!\n_(Tanto non funziona.)_".format(cmd[1]),
sentin, source)
def ciaospaggia():
# Buongiorno, stellina!
print("@" + username + ": /ciaospaggia")
telegram.sendmessage("Ma buongiorno, [Stellina](https://telegram.me/doom_darth_vader)!", sentin, source)
def smecds():
# Secondo me, è colpa...
print("@" + username + ": /smecds")
accusato = random.sample(stagismo, 1)[0]
telegram.sendmessage("Secondo me è colpa {accusato}...".format(accusato=accusato), sentin, source)
def version():
# Visualizza la versione di Royal Bot
# Solo per UNIX...?
print("@" + username + ": /version")
gitmsg = subprocess.check_output(["git", "describe"])
telegram.sendmessage(gitmsg, sentin, source)
def match():
# Visualizza detutti i giochi condivisi tra x persone.
print("@" + username + ": /match")
telegram.sendmessage("_Funzione temporaneamente disattivata._", sentin, source)
# Informa Telegram che il messaggio è stato ricevuto.
# telegram.sendchataction(sentin)
# tobematched = list()
# if len(cmd) > 2:
# del cmd[0]
# for name in cmd:
# userdata = db.findbyname(name)
# if userdata is not None and 'steam' in userdata:
# if userdata['steam'] not in tobematched:
# tobematched.append(userdata['steam'])
# if len(tobematched) > 1:
# m = list(steammatch.and_games(tobematched))
# if len(m) > 0:
# # Prepara il messaggio
# tosend = "*Giochi in comune tra questi utenti:*\n"
# for game in m:
# tosend += "- {game}\n".format(game=game)
# # Manda il messaggio
# telegram.sendmessage(tosend, sentin, source)
# else:
# telegram.sendmessage("*Giochi in comune tra questi utenti:*\n_nessuno_", sentin, source)
# else:
# telegram.sendmessage(chr(9888) + "Non sono stati specificati abbastanza utenti per eseguire l'azione.",
# sentin, source)
def share():
# Visualizza detutti i giochi condivisi tra x persone.
print("@" + username + ": /share")
telegram.sendmessage("_Funzione temporaneamente disattivata._", sentin, source)
# Informa Telegram che il messaggio è stato ricevuto.
# telegram.sendchataction(sentin)
# tobematched = list()
# if len(cmd) > 2:
# del cmd[0]
# for name in cmd:
# userdata = db.findbyname(name)
# if userdata is not None and 'steam' in userdata:
# if userdata['steam'] not in tobematched:
# tobematched.append(userdata['steam'])
# if len(tobematched) == 2:
# tosend = str()
# # Giochi che ha il primo ma non il secondo
# d = list(steammatch.diff_games(tobematched[0], tobematched[1]))
# if len(d) > 0:
# # Prepara il messaggio
# tosend += "*Giochi che ha @{primo} ma non @{secondo}:*\n".format(primo=cmd[0], secondo=cmd[1])
# for game in d:
# tosend += "- {game}\n".format(game=game)
# else:
# tosend += "_@{secondo} ha tutti i giochi che ha @{primo}_.\n"
# telegram.sendmessage(tosend, sentin, source)
# else:
# telegram.sendmessage(chr(9888) + "Non è stato specificato un numero adeguato di utenti per eseguire l'azione.",
# sentin, source)
# Alias di tutti i comandi. Scrivendo quella stringa in chat viene attivata la funzione corrispondente.
aliases = {
"ahnonlosoio": ahnonlosoio,
"ahboh": ahnonlosoio,
"ciaostefanino": ciaostefanino,
"balurage": balurage,
"madden": balurage,
"ciaoruozi": ciaoruozi,
"ciaospaggia": ciaospaggia,
"buongiornostellina": ciaospaggia,
"stellina": ciaospaggia,
"ehoh": ehoh,
"sbam": sbam,
"rekt": sbam,
"osu": osucmd,
"roll": roll,
"cv": cv,
"discord": cv,
"shrek": shrek,
"diario": diario,
"d": diario,
"leggi": leggi,
"match": match,
"lolfree": lolfree,
"legoflegend": lolfree,
"getrygimage": getrygimage,
"version": version,
"smecds": smecds,
"online": online,
"steam": online,
"wow": wow,
"share": share
}
# Ciclo principale del bot
print("Bot avviato!")
while True:
try:
# Guarda il comando .
msg = telegram.getupdates()
# Se il messaggio non è una notifica di servizio...
if 'edit' in msg:
if msg['edit']:
if 'text' in msg['edit_data']:
# Salvatelo in una stringa
text = msg['edit_data']['text']
# Guarda l'ID della chat in cui è stato inviato
sentin = msg['edit_data']['chat']['id']
# ID del messaggio ricevuto
source = msg['edit_data']['message_id']
if 'username' in msg['edit_data']['from']:
# Salva l'username se esiste
username = msg['edit_data']['from']['username']
else:
# Altrimenti, salva l'userID
username = str(msg['edit_data']['from']['id'])
# Se sei un membro della Royal Games
if db.findbyname(username) in royalgames:
# Riconosci il comando.
if text.startswith('wow'):
wow()
if 'text' in msg:
# Salvatelo in una stringa
text = msg['text']
# Guarda l'ID della chat in cui è stato inviato
sentin = msg['chat']['id']
# Persona che ha inviato il messaggio
fromuser = msg['from']['id']
# ID del messaggio ricevuto
source = msg['message_id']
# Nome da visualizzare nella console per capire chi accidenti è che invia messaggi strani
if 'username' in msg['from']:
# Salva l'username se esiste
username = msg['from']['username']
else:
# Altrimenti, salva l'userID
username = str(msg['from']['id'])
# Se sei un membro della Royal Games
if db.findbyname(username):
# Riconosci il comando e dividilo in comando e argomenti.
cmd = text.lower().split(" ")
# Togli il @RoyalBot alla fine del comando
cmd[0] = cmd[0].replace("@royalbot", "").replace("/", "")
# Prova ad eseguire il comando. Se non è nella lista degli alias, ignoralo.
try:
aliases[cmd[0]]()
except KeyError:
print("@" + username + ": comando inesistente")
except Exception as e:
# Se durante l'esecuzione di un comando viene generato un errore, visualizzalo nella chat in cui è stato generato.
telegram.sendmessage(chr(9762) + " *Errore durante l'esecuzione del comando:\n*"
"{}\n\n"
"Secondo me, è colpa {}.".format(repr(e), random.sample(stagismo, 1)[0]), sentin, source)
print("\033[1mERRORE:\n{}\033[0m".format(repr(e)))
# Se sei in modalità debug, interrompi l'esecuzione
if __debug__:
raise
else:
print("@" + username + " bloccato.")
except Exception as e:
# Se la fase iniziale causa un errore, mandalo nel gruppo Royal Games.
telegram.sendmessage(chr(9762) + " *Errore critico:\n*"
"{}\n\n"
"Secondo me, è colpa {}.".format(repr(e), random.sample(stagismo, 1)[0]), -2141322)
print("\033[1mERRORE CRITICO:\n"
"{0}\033[0m".format(repr(e)))
# Se sei in modalità debug, interrompi l'esecuzione
if __debug__:
raise
|
eBay/restcommander
|
refs/heads/master
|
play-1.2.4/python/Lib/site-packages/Rpyc/Connection.py
|
4
|
import sys
from Boxing import Box, dump_exception, load_exception
from ModuleNetProxy import RootImporter
from Lib import raise_exception, AttrFrontend
FRAME_REQUEST = 1
FRAME_RESULT = 2
FRAME_EXCEPTION = 3
class Connection(object):
"""
the rpyc connection layer (protocol and APIs). generally speaking, the only
things you'll need to access directly from this object are:
* modules - represents the remote python interprerer's modules namespace
* execute - executes the given code on the other side of the connection
* namespace - the namespace in which the code you `execute` resides
the rest of the attributes should be of no intresent to you, except maybe
for `remote_conn`, which represents the other side of the connection. it is
unlikely, however, you'll need to use it (it is used interally).
when you are done using a connection, and wish to release the resources it
holds, you should call close(). you don't have to, but if you don't, the gc
can't release the memory because of cyclic references.
"""
__slots__ = ["_closed", "_local_namespace", "channel", "box", "async_replies",
"sync_replies", "module_cache", "remote_conn", "modules", "namespace"]
def __init__(self, channel):
self._closed = False
self._local_namespace = {}
self.channel = channel
self.box = Box(self)
self.async_replies = {}
self.sync_replies = {}
self.module_cache = {}
self.remote_conn = self.sync_request("handle_getconn")
# user APIs:
self.modules = RootImporter(self)
self.namespace = AttrFrontend(self.remote_conn._local_namespace)
self.execute("")
def __repr__(self):
if self._closed:
return "<%s.%s(closed)>" % (self.__class__.__module__, self.__class__.__name__)
else:
return "<%s.%s(%r)>" % (self.__class__.__module__, self.__class__.__name__, self.channel)
#
# file api layer
#
def close(self):
"""closes down the connection and releases all cyclic dependecies"""
if not self._closed:
self.box.close()
self.channel.close()
self._closed = True
self._local_namespace = None
self.channel = None
self.box = None
self.async_replies = None
self.sync_replies = None
self.module_cache = None
self.modules = None
self.remote_conn = None
self.namespace = None
def fileno(self):
"""connections are select()able"""
return self.channel.fileno()
#
# protocol
#
def send(self, type, seq, obj):
if self._closed:
raise EOFError("the connection is closed")
return self.channel.send(type, seq, self.box.pack(obj))
def send_request(self, handlername, *args):
return self.send(FRAME_REQUEST, None, (handlername, args))
def send_exception(self, seq, exc_info):
self.send(FRAME_EXCEPTION, seq, dump_exception(*exc_info))
def send_result(self, seq, obj):
self.send(FRAME_RESULT, seq, obj)
#
# dispatching
#
def dispatch_result(self, seq, obj):
if seq in self.async_replies:
self.async_replies.pop(seq)(obj, False)
else:
self.sync_replies[seq] = obj
def dispatch_exception(self, seq, obj):
excobj = load_exception(obj)
if seq in self.async_replies:
self.async_replies.pop(seq)(excobj, True)
else:
raise_exception(*excobj)
def dispatch_request(self, seq, handlername, args):
try:
res = getattr(self, handlername)(*args)
except SystemExit:
raise
except:
self.send_exception(seq, sys.exc_info())
else:
self.send_result(seq, res)
def poll(self):
"""if available, serves a single request, otherwise returns (non-blocking serve)"""
if self.channel.is_available():
self.serve()
return True
else:
return False
def serve(self):
"""serves a single request (may block)"""
type, seq, data = self.channel.recv()
if type == FRAME_RESULT:
self.dispatch_result(seq, self.box.unpack(data))
elif type == FRAME_REQUEST:
self.dispatch_request(seq, *self.box.unpack(data))
elif type == FRAME_EXCEPTION:
self.dispatch_exception(seq, self.box.unpack(data))
else:
raise ValueError("invalid frame type (%d)" % (type,))
#
# requests
#
def sync_request(self, handlername, *args):
"""performs a synchronous (blocking) request"""
seq = self.send_request(handlername, *args)
while seq not in self.sync_replies:
self.serve()
return self.sync_replies.pop(seq)
def async_request(self, callback, handlername, *args):
"""performs an asynchronous (non-blocking) request"""
seq = self.send_request(handlername, *args)
self.async_replies[seq] = callback
#
# root requests (not through NetProxies)
#
def rimport(self, modulename):
"""imports a module by name (as a string)"""
if modulename not in self.module_cache:
module = self.sync_request("handle_import", modulename)
self.module_cache[modulename] = module
return self.module_cache[modulename]
def execute(self, expr, mode = "exec"):
"""executes the given code at the remote side of the connection"""
return self.sync_request("handle_execute", expr, mode)
#
# handlers
#
def handle_decref(self, oid):
self.box.decref(oid)
def handle_delattr(self, oid, name):
delattr(self.box[oid], name)
def handle_getattr(self, oid, name):
return getattr(self.box[oid], name)
def handle_setattr(self, oid, name, value):
setattr(self.box[oid], name, value)
def handle_delitem(self, oid, index):
del self.box[oid][index]
def handle_getitem(self, oid, index):
return self.box[oid][index]
def handle_setitem(self, oid, index, value):
self.box[oid][index] = value
def handle_call(self, oid, args, kwargs):
return self.box[oid](*args, **kwargs)
def handle_repr(self, oid):
return repr(self.box[oid])
def handle_str(self, oid):
return str(self.box[oid])
def handle_bool(self, oid):
return bool(self.box[oid])
def handle_import(self, modulename):
return __import__(modulename, None, None, modulename.split(".")[-1])
def handle_getconn(self):
return self
def handle_execute(self, expr, mode):
codeobj = compile(expr, "<from %s>" % (self,), mode)
return eval(codeobj, self._local_namespace)
|
EmadMokhtar/Django
|
refs/heads/master
|
tests/shortcuts/urls.py
|
60
|
from django.urls import path
from . import views
urlpatterns = [
path('render/', views.render_view),
path('render/multiple_templates/', views.render_view_with_multiple_templates),
path('render/content_type/', views.render_view_with_content_type),
path('render/status/', views.render_view_with_status),
path('render/using/', views.render_view_with_using),
]
|
zangruizhe/YouCompleteMe_install_for_centos_6.6
|
refs/heads/master
|
third_party/pythonfutures/concurrent/futures/process.py
|
196
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Request Q"
"""
from __future__ import with_statement
import atexit
import multiprocessing
import threading
import weakref
import sys
from concurrent.futures import _base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
nb_shutdown_processes = [0]
def shutdown_one_process():
"""Tell a worker to terminate, which will in turn wake us again"""
call_queue.put(None)
nb_shutdown_processes[0] += 1
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
result_item = result_queue.get(block=True)
if result_item is not None:
work_item = pending_work_items[result_item.work_id]
del pending_work_items[result_item.work_id]
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if _shutdown or executor is None or executor._shutdown_thread:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
while nb_shutdown_processes[0] < len(processes):
shutdown_one_process()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS
# X.
for p in processes:
p.join()
call_queue.close()
return
del executor
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
import os
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermine limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
self._result_queue = multiprocessing.Queue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
self._processes = set()
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes.add(p)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
self._adjust_process_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of openning too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
|
trnewman/VT-USRP-daughterboard-drivers
|
refs/heads/master
|
gr-trellis/src/examples/test_sccc_soft.py
|
8
|
#!/usr/bin/env python
from gnuradio import gr
from gnuradio import audio
from gnuradio import trellis
from gnuradio import eng_notation
import math
import sys
import random
import fsm_utils
def run_test (fo,fi,interleaver,Kb,bitspersymbol,K,dimensionality,constellation,N0,seed):
tb = gr.top_block ()
# TX
src = gr.lfsr_32k_source_s()
src_head = gr.head (gr.sizeof_short,Kb/16) # packet size in shorts
s2fsmi = gr.packed_to_unpacked_ss(bitspersymbol,gr.GR_MSB_FIRST) # unpack shorts to symbols compatible with the outer FSM input cardinality
enc_out = trellis.encoder_ss(fo,0) # initial state = 0
inter = trellis.permutation(interleaver.K(),interleaver.INTER(),1,gr.sizeof_short)
enc_in = trellis.encoder_ss(fi,0) # initial state = 0
mod = gr.chunks_to_symbols_sf(constellation,dimensionality)
# CHANNEL
add = gr.add_ff()
noise = gr.noise_source_f(gr.GR_GAUSSIAN,math.sqrt(N0/2),seed)
# RX
metrics_in = trellis.metrics_f(fi.O(),dimensionality,constellation,trellis.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for innner Viterbi
gnd = gr.vector_source_f([0],True);
siso_in = trellis.siso_f(fi,K,0,-1,True,False,trellis.TRELLIS_MIN_SUM) # Put -1 if the Initial/Final states are not set.
deinter = trellis.permutation(interleaver.K(),interleaver.DEINTER(),fi.I(),gr.sizeof_float)
va_out = trellis.viterbi_s(fo,K,0,-1) # Put -1 if the Initial/Final states are not set.
fsmi2s = gr.unpacked_to_packed_ss(bitspersymbol,gr.GR_MSB_FIRST) # pack FSM input symbols to shorts
dst = gr.check_lfsr_32k_s()
tb.connect (src,src_head,s2fsmi,enc_out,inter,enc_in,mod)
tb.connect (mod,(add,0))
tb.connect (noise,(add,1))
tb.connect (add,metrics_in)
tb.connect (gnd,(siso_in,0))
tb.connect (metrics_in,(siso_in,1))
tb.connect (siso_in,deinter,va_out,fsmi2s,dst)
tb.run()
ntotal = dst.ntotal ()
nright = dst.nright ()
runlength = dst.runlength ()
return (ntotal,ntotal-nright)
def main(args):
nargs = len (args)
if nargs == 4:
fname_out=args[0]
fname_in=args[1]
esn0_db=float(args[2]) # Es/No in dB
rep=int(args[3]) # number of times the experiment is run to collect enough errors
else:
sys.stderr.write ('usage: test_tcm.py fsm_name_out fsm_fname_in Es/No_db repetitions\n')
sys.exit (1)
# system parameters
Kb=1024*16 # packet size in bits (make it multiple of 16 so it can be packed in a short)
fo=trellis.fsm(fname_out) # get the outer FSM specification from a file
fi=trellis.fsm(fname_in) # get the innner FSM specification from a file
bitspersymbol = int(round(math.log(fo.I())/math.log(2))) # bits per FSM input symbol
if fo.O() != fi.I():
sys.stderr.write ('Incompatible cardinality between outer and inner FSM.\n')
sys.exit (1)
K=Kb/bitspersymbol # packet size in trellis steps
interleaver=trellis.interleaver(K,666) # construct a random interleaver
modulation = fsm_utils.psk8 # see fsm_utlis.py for available predefined modulations
dimensionality = modulation[0]
constellation = modulation[1]
if len(constellation)/dimensionality != fi.O():
sys.stderr.write ('Incompatible FSM output cardinality and modulation size.\n')
sys.exit (1)
# calculate average symbol energy
Es = 0
for i in range(len(constellation)):
Es = Es + constellation[i]**2
Es = Es / (len(constellation)/dimensionality)
N0=Es/pow(10.0,esn0_db/10.0); # calculate noise variance
tot_s=0 # total number of transmitted shorts
terr_s=0 # total number of shorts in error
terr_p=0 # total number of packets in error
for i in range(rep):
(s,e)=run_test(fo,fi,interleaver,Kb,bitspersymbol,K,dimensionality,constellation,N0,-long(666+i)) # run experiment with different seed to get different noise realizations
tot_s=tot_s+s
terr_s=terr_s+e
terr_p=terr_p+(terr_s!=0)
if ((i+1)%100==0) : # display progress
print i+1,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
# estimate of the (short or bit) error rate
print rep,terr_p, '%.2e' % ((1.0*terr_p)/(i+1)),tot_s,terr_s, '%.2e' % ((1.0*terr_s)/tot_s)
if __name__ == '__main__':
main (sys.argv[1:])
|
HKuz/Test_Code
|
refs/heads/master
|
LinearAlgebra/vectorTest.py
|
1
|
#!/usr/local/bin/Python3
from vector import Vector
def main():
test_operators = False
test_mag_and_norm = False
test_dot_and_angles = False
test_parallel_orthogonal = False
test_vector_projection = False
test_cross_product = True
test_errors = False # Includes examples to deliberately throw errors
# Test addition, subtraction, and scalar multiplication of vectors
if test_operators:
print("\n=== Addition, Subtraction, and Scalar Multiplication Quiz ===\n")
vec1 = Vector([8.218, -9.341])
vec2 = Vector([-1.129, 2.111])
print(vec1)
print(vec2)
print("Sum: {}\n".format(vec1+vec2))
vec3 = Vector([7.119, 8.215])
vec4 = Vector([-8.223, 0.878])
print(vec3)
print(vec4)
print("Difference: {}\n".format(vec3-vec4))
vec5 = Vector([1.671, -1.012, -0.318])
c = 7.41
print(vec5)
print("Scaled by {}: {}\n".format(c, vec5.scale(c)))
# Test magnitude and normalization of vectors
if test_mag_and_norm:
print("\n=== Magnitude and Normalization Quiz ===\n")
vec6 = Vector([-0.221, 7.437])
print(vec6)
print("Magnitude: {}\n".format(vec6.magnitude()))
vec7 = Vector([8.813, -1.331, -6.247])
print(vec7)
print("Magnitude: {}\n".format(vec7.magnitude()))
vec8 = Vector([5.581, -2.136])
print(vec8)
print("Normalization: {}\n".format(vec8.normalization()))
vec9 = Vector([1.996, 3.108, -4.554])
print(vec9)
print("Normalization: {}\n".format(vec9.normalization()))
# Error test Zero Vector
if test_errors:
vec_zero = Vector([0, 0, 0])
print(vec_zero)
print("Normalization: {}\n".format(vec_zero.normalization()))
# Test dot products and angles between vectors
if test_dot_and_angles:
print("\n=== Dot Products and Angles between Vectors Quiz ===\n")
vec10 = Vector([7.887, 4.138])
vec11 = Vector([-8.802, 6.776])
print(vec10)
print(vec11)
print("Dot product: {}\n".format(vec10.dot_product(vec11)))
vec12 = Vector([-5.955, -4.904, -1.874])
vec13 = Vector([-4.496, -8.755, 7.103])
print(vec12)
print(vec13)
print("Dot product: {}\n".format(vec12.dot_product(vec13)))
vec14 = Vector([3.183, -7.627])
vec15 = Vector([-2.668, 5.319])
print(vec14)
print(vec15)
print("Angle (in radians): {}\n".format(vec14.angle(vec15)))
vec16 = Vector([7.35, 0.221, 5.188])
vec17 = Vector([2.751, 8.259, 3.985])
print(vec16)
print(vec17)
print("Angle (in degrees): {}\n".format(vec16.angle(vec17, True)))
# Not part of the quiz
vec_a = Vector([2, 0])
vec_b = Vector([0, 4])
print(vec_a)
print(vec_b)
print("Dot product: {}".format(vec_a.dot_product(vec_b)))
print("Angle (in radian): {}".format(vec_a.angle(vec_b)))
print("Angle (in degrees): {}\n".format(vec_a.angle(vec_b, True)))
if test_errors:
vec_zero = Vector([0, 0, 0])
print(vec16)
print(vec_zero)
print("Angle (in radians): {}\n".format(vec16.angle(vec_zero)))
# Test Parallel and Orthogonal
if test_parallel_orthogonal:
print("\n=== Parallel and Orthogonal Quiz ===\n")
vec18 = Vector([-7.579, -7.880])
vec19 = Vector([22.737, 23.64])
print(vec18)
print(vec19)
print("Parallel: {}".format(vec18.is_parallel(vec19)))
print("Orthogonal: {}\n".format(vec18.is_orthogonal(vec19)))
vec20 = Vector([-2.029, 9.970, 4.172])
vec21 = Vector([-9.231, -6.639, -7.245])
print(vec20)
print(vec21)
print("Parallel: {}".format(vec20.is_parallel(vec21)))
print("Orthogonal: {}\n".format(vec20.is_orthogonal(vec21)))
vec22 = Vector([-2.328, -7.284, -1.214])
vec23 = Vector([-1.821, 1.072, -2.940])
print(vec22)
print(vec23)
print("Parallel: {}".format(vec22.is_parallel(vec23)))
print("Orthogonal: {}\n".format(vec22.is_orthogonal(vec23)))
vec24 = Vector([2.118, 4.827])
vec25 = Vector([0, 0])
print(vec24)
print(vec25)
print("Parallel: {}".format(vec24.is_parallel(vec25)))
print("Orthogonal: {}\n".format(vec24.is_orthogonal(vec25)))
# Test Projection of Vectors and Component of Vector Orthogonal to b
if test_vector_projection:
print("\n=== Projection of Vectors Quiz ===\n")
vec26 = Vector([3.039, 1.879])
b1 = Vector([0.825, 2.036])
print("V: {}".format(vec26))
print("Basis: {}".format(b1))
print("Projection of V onto Basis: {}\n".format(b1.component_parallel_to(vec26)))
vec27 = Vector([-9.88, -3.264, -8.159])
b2 = Vector([-2.155, -9.353, -9.473])
print("V: {}".format(vec27))
print("Basis: {}".format(b2))
print("Component V orthogonal to Basis: {}\n".format(b2.component_orthogonal_to(vec27)))
vec28 = Vector([3.009, -6.172, 3.692, -2.510])
b3 = Vector([6.404, -9.144, 2.759, 8.718])
print(vec28)
print(b3)
proj = b3.component_parallel_to(vec28)
comp_orth = b3.component_orthogonal_to(vec28)
print("Vparallel: {}".format(proj))
print("Vorthogonal: {}".format(comp_orth))
print("Vparallel + Vorthogonal: {}\n".format(proj + comp_orth))
# Test Cross Product of Two 3D Vectors
if test_cross_product:
print("\n=== Cross Product of Two 3D Vectors Quiz ===\n")
vec29 = Vector([8.462, 7.893, -8.187])
vec30 = Vector([6.984, -5.975, 4.778])
print(vec29)
print(vec30)
print("Cross product: {}\n".format(vec29.cross_product(vec30)))
vec31 = Vector([-8.987, -9.838, 5.031])
vec32 = Vector([-4.268, -1.861, -8.866])
print(vec31)
print(vec32)
print("Area of parallelogram: {}".format(vec31.area_of_parallelogram_with(vec32)))
print("Area of triangle: {}\n".format(vec31.area_of_triangle_with(vec32)))
vec33 = Vector([1.500, 9.547, 3.691])
vec34 = Vector([-6.007, 0.124, 5.772])
print(vec33)
print(vec34)
print("Area of parallelogram: {}".format(vec33.area_of_parallelogram_with(vec34)))
print("Area of triangle: {}\n".format(vec33.area_of_triangle_with(vec34)))
if test_errors:
vec35 = Vector([1.7, 2.3])
vec36 = Vector([9.5, 8.4])
print(vec35)
print(vec36)
print("Cross product: {}".format(vec35.cross_product(vec36)))
print("Area of parallelogram: {}".format(vec35.area_of_parallelogram_with(vec36)))
print("Area of triangle: {}\n".format(vec35.area_of_triangle_with(vec36)))
vec37 = Vector([1.7])
vec38 = Vector([4.6])
print(vec37)
print(vec38)
print("Cross product: {}".format(vec37.cross_product(vec38)))
print("Area of parallelogram: {}".format(vec37.area_of_parallelogram_with(vec38)))
print("Area of triangle: {}\n".format(vec37.area_of_triangle_with(vec38)))
if __name__ == '__main__':
main()
|
sysbot/CouchPotatoServer
|
refs/heads/master
|
libs/enzyme/flv.py
|
180
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
from exceptions import ParseError
import core
import logging
import struct
__all__ = ['Parser']
# get logging object
log = logging.getLogger(__name__)
FLV_TAG_TYPE_AUDIO = 0x08
FLV_TAG_TYPE_VIDEO = 0x09
FLV_TAG_TYPE_META = 0x12
# audio flags
FLV_AUDIO_CHANNEL_MASK = 0x01
FLV_AUDIO_SAMPLERATE_MASK = 0x0c
FLV_AUDIO_CODECID_MASK = 0xf0
FLV_AUDIO_SAMPLERATE_OFFSET = 2
FLV_AUDIO_CODECID_OFFSET = 4
FLV_AUDIO_CODECID = (0x0001, 0x0002, 0x0055, 0x0001)
# video flags
FLV_VIDEO_CODECID_MASK = 0x0f
FLV_VIDEO_CODECID = ('FLV1', 'MSS1', 'VP60') # wild guess
FLV_DATA_TYPE_NUMBER = 0x00
FLV_DATA_TYPE_BOOL = 0x01
FLV_DATA_TYPE_STRING = 0x02
FLV_DATA_TYPE_OBJECT = 0x03
FLC_DATA_TYPE_CLIP = 0x04
FLV_DATA_TYPE_REFERENCE = 0x07
FLV_DATA_TYPE_ECMARRAY = 0x08
FLV_DATA_TYPE_ENDOBJECT = 0x09
FLV_DATA_TYPE_ARRAY = 0x0a
FLV_DATA_TYPE_DATE = 0x0b
FLV_DATA_TYPE_LONGSTRING = 0x0c
FLVINFO = {
'creator': 'copyright',
}
class FlashVideo(core.AVContainer):
"""
Experimental parser for Flash videos. It requires certain flags to
be set to report video resolutions and in most cases it does not
provide that information.
"""
table_mapping = { 'FLVINFO' : FLVINFO }
def __init__(self, file):
core.AVContainer.__init__(self)
self.mime = 'video/flv'
self.type = 'Flash Video'
data = file.read(13)
if len(data) < 13 or struct.unpack('>3sBBII', data)[0] != 'FLV':
raise ParseError()
for _ in range(10):
if self.audio and self.video:
break
data = file.read(11)
if len(data) < 11:
break
chunk = struct.unpack('>BH4BI', data)
size = (chunk[1] << 8) + chunk[2]
if chunk[0] == FLV_TAG_TYPE_AUDIO:
flags = ord(file.read(1))
if not self.audio:
a = core.AudioStream()
a.channels = (flags & FLV_AUDIO_CHANNEL_MASK) + 1
srate = (flags & FLV_AUDIO_SAMPLERATE_MASK)
a.samplerate = (44100 << (srate >> FLV_AUDIO_SAMPLERATE_OFFSET) >> 3)
codec = (flags & FLV_AUDIO_CODECID_MASK) >> FLV_AUDIO_CODECID_OFFSET
if codec < len(FLV_AUDIO_CODECID):
a.codec = FLV_AUDIO_CODECID[codec]
self.audio.append(a)
file.seek(size - 1, 1)
elif chunk[0] == FLV_TAG_TYPE_VIDEO:
flags = ord(file.read(1))
if not self.video:
v = core.VideoStream()
codec = (flags & FLV_VIDEO_CODECID_MASK) - 2
if codec < len(FLV_VIDEO_CODECID):
v.codec = FLV_VIDEO_CODECID[codec]
# width and height are in the meta packet, but I have
# no file with such a packet inside. So maybe we have
# to decode some parts of the video.
self.video.append(v)
file.seek(size - 1, 1)
elif chunk[0] == FLV_TAG_TYPE_META:
log.info(u'metadata %r', str(chunk))
metadata = file.read(size)
try:
while metadata:
length, value = self._parse_value(metadata)
if isinstance(value, dict):
log.info(u'metadata: %r', value)
if value.get('creator'):
self.copyright = value.get('creator')
if value.get('width'):
self.width = value.get('width')
if value.get('height'):
self.height = value.get('height')
if value.get('duration'):
self.length = value.get('duration')
self._appendtable('FLVINFO', value)
if not length:
# parse error
break
metadata = metadata[length:]
except (IndexError, struct.error, TypeError):
pass
else:
log.info(u'unkown %r', str(chunk))
file.seek(size, 1)
file.seek(4, 1)
def _parse_value(self, data):
"""
Parse the next metadata value.
"""
if ord(data[0]) == FLV_DATA_TYPE_NUMBER:
value = struct.unpack('>d', data[1:9])[0]
return 9, value
if ord(data[0]) == FLV_DATA_TYPE_BOOL:
return 2, bool(data[1])
if ord(data[0]) == FLV_DATA_TYPE_STRING:
length = (ord(data[1]) << 8) + ord(data[2])
return length + 3, data[3:length + 3]
if ord(data[0]) == FLV_DATA_TYPE_ECMARRAY:
init_length = len(data)
num = struct.unpack('>I', data[1:5])[0]
data = data[5:]
result = {}
for _ in range(num):
length = (ord(data[0]) << 8) + ord(data[1])
key = data[2:length + 2]
data = data[length + 2:]
length, value = self._parse_value(data)
if not length:
return 0, result
result[key] = value
data = data[length:]
return init_length - len(data), result
log.info(u'unknown code: %x. Stop metadata parser', ord(data[0]))
return 0, None
Parser = FlashVideo
|
dednal/chromium.src
|
refs/heads/nw12
|
chrome/common/extensions/docs/examples/apps/hello-python/httplib2/iri2uri.py
|
885
|
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
|
OstapHEP/ostap
|
refs/heads/master
|
ostap/parallel/parallel_project.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/parallel/parallel_project.py
# Paralllel "project" from loooong chain/trees objects
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2014-09-23
# =============================================================================
"""Paralllel ``project'' from loooong chain/trees objects
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__date__ = "2011-06-07"
__all__ = (
'cproject' , ## parallel project from looong TChain
'tproject' , ## parallel project from looong TTree
)
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.parallel.project' )
else : logger = getLogger ( __name__ )
# =============================================================================
import ROOT
from ostap.parallel.parallel import Task, WorkManager
import ostap.core.pyrouts
import ostap.trees.trees
# =============================================================================
## The simple task object for more efficient projection of loooong chains/trees
# into histogarms
# @see GaudiMP.Parallel
# @see GaudiMP.Parallel.Task
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2014-09-23
class ProjectTask(Task) :
"""The simple task object for the efficient parallel
projection of looooooong TChains/TTrees into histograms
"""
## constructor: histogram
def __init__ ( self , histo , what , cuts = '' ) :
"""Constructor: the histogram
>>> histo = ...
>>> task = ProjectTask ( histo )
"""
self.histo = histo
self.what = what
self.cuts = str ( cuts )
self.histo.Reset()
## local initialization (executed once in parent process)
def initialize_local ( self ) :
"""Local initialization (executed once in parent process)
"""
import ROOT,ostap.core.pyrouts
self.__output = 0, self.histo.clone()
## remote initialization (executed for each sub-processs)
def initialize_remote ( self , jobid = -1 ) :
"""Remote initialization (executed for each sub-processs
"""
import ROOT,ostap.core.pyrouts
self.__output = 0, self.histo.clone()
## finalization (executed at the end at parent process)
def finalize ( self ) : pass
## the actual processing
# ``params'' is assumed to be a tuple/list :
# - the file name
# - the tree name in the file
# - the variable/expression/expression list of quantities to project
# - the selection/weighting criteria
# - the first entry in tree to process
# - number of entries to process
def process ( self , jobid , item ) :
"""The actual processing
``params'' is assumed to be a tuple-like entity:
- the file name
- the tree name in the file
- the variable/expression/expression list of quantities to project
- the selection/weighting criteria
- the first entry in tree to process
- number of entries to process
"""
import ROOT
from ostap.logger.utils import logWarning
with logWarning() :
import ostap.core.pyrouts
import ostap.trees.trees
import ostap.histos.histos
import ostap.frames.frames
from ostap.trees.trees import Chain, Tree
input = Chain ( name = item.name ,
files = item.files ,
first = item.first ,
nevents = item.nevents )
chain = input.chain
first = input.first
nevents = input.nevents
## use the regular projection
from ostap.trees.trees import _tt_project_
## Create the output histogram NB! (why here???)
from ostap.core.core import ROOTCWD
with ROOTCWD() :
ROOT.gROOT.cd()
histo = self.histo.Clone ()
self.__output = 0 , histo
from ostap.trees.trees import _tt_project_
self.__output = _tt_project_ ( tree = chain , histo = histo ,
what = self.what , cuts = self.cuts ,
options = '' ,
nentries = nevents , firstentry = first )
return self.__output
## merge results
def merge_results ( self , result , jobid ) :
import ostap.histos.histos
if not self.__output : self.__output = result
else :
filtered = self.__output[0] + result[0]
self.__output[1].Add ( result[1] )
self.__output = filtered, self.__output[1]
## result[1].Delete ()
## get the results
def results ( self ) :
return self.__output
# =============================================================================
## make a projection of the loooooooong chain into histogram using
# multiprocessing functionality for per-file parallelisation
# @code
# >>> chain = ... ## large chain
# >>> histo = ... ## histogram template
# >>> project ( chain , histo , 'mass' , 'pt>10' )
# >>> chain.pproject ( histo , 'mass' , 'pt>0' ) ## ditto
# >>> chain.cproject ( histo , 'mass' , 'pt>0' ) ## ditto
# @endcode
# For 12-core machine, clear speedup factor of about 8 is achieved
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2014-09-23
def cproject ( chain ,
histo ,
what ,
cuts ,
nentries = -1 ,
first = 0 ,
chunk_size = -1 ,
max_files = 5 ,
silent = False , **kwargs ) :
"""Make a projection of the loooong chain into histogram
>>> chain = ... ## large chain
>>> histo = ... ## histogram template
>>> cproject ( chain , histo , 'mass' , 'pt>10' )
>>> chain.ppropject ( histo , 'mass' , 'pt>0' ) ## ditto
>>> chain.cpropject ( histo , 'mass' , 'pt>0' ) ## ditto
For 12-core machine, clear speedup factor of about 8 is achieved
"""
#
from ostap.trees.trees import Chain
ch = Chain ( chain , first = first , nevents = nentries )
task = ProjectTask ( histo , what , cuts )
wmgr = WorkManager ( silent = silent , **kwargs )
wmgr.process ( task , ch.split ( chunk_size = chunk_size , max_files = max_files ) )
## unpack results
_f , _h = task.results ()
filtered = _f
histo += _h
del _h
return filtered , histo
ROOT.TChain.cproject = cproject
ROOT.TChain.pproject = cproject
# =============================================================================
## make a projection of the loooooooong tree into histogram using
# multiprocessing functionality for per-file parallelisation
# @code
#
# >>> tree = ... ## large tree
# >>> histo = ... ## histogram template
# >>> tproject ( tree , histo , 'mass' , 'pt>10' , maxentries = 1000000 )
# >>> tree.pproject ( histo , 'mass' , 'pt>10' ) ## ditto
# @endcode
# - significant gain can be achieved for very large ttrees with complicated expressions and cuts
# - <code>maxentries</code> parameter should be rather large
# @param tree the tree
# @param histo the histogram
# @param what variable/expression/varlist to be projected
# @param cuts selection/weighting criteria
# @param nentries number of entries to process (>0: all entries in th tree)
# @param first the first entry to process
# @param maxentries chunk size for parallel processing
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2014-09-23
def tproject ( tree , ## the tree
histo , ## histogram
what , ## variable/expression/list to be projected
cuts = '' , ## selection/weighting criteria
nentries = -1 , ## number of entries
first = 0 , ## the first entry
chunk_size = 1000000 , ## chunk size
max_files = 50 , ## not-used ....
silent = False , **kwargs ) : ## silent processing
"""Make a projection of the loooong tree into histogram
>>> tree = ... ## large chain
>>> histo = ... ## histogram template
>>> tproject ( tree , histo , 'mass' , 'pt>10' )
>>> tree.pproject ( histo , 'mass' , 'pt>10' ) ## ditto
- significant gain can be achieved for very large TTrees with complicated expressions and cuts
- maxentries parameter should be rather large
Arguments:
- tree the tree
- histo the histogram
- what variable/expression/varlist to be projected
- cuts selection/weighting criteria
- nentries number of entries to process (>0: all entries in th tree)
- first the first entry to process
- maxentries chunk size for parallel processing
"""
from ostap.trees.trees import Tree
ch = Tree ( tree , first = first , nevents = nentries )
task = ProjectTask ( histo , what , cuts )
wmgr = WorkManager ( silent = silent , **kwargs )
wmgr.process ( task, ch.split ( chunk_size = chunk_size ) )
## unpack results
_f , _h = task.results ()
filtered = _f
histo += _h
del _h
return filtered , histo
ROOT.TTree.tproject = tproject
ROOT.TTree.pproject = tproject
# =============================================================================
_decorated_classes_ = (
ROOT.TTree ,
ROOT.TChain ,
)
_new_methods_ = (
ROOT.TTree .tproject ,
ROOT.TTree .pproject ,
ROOT.TChain.cproject ,
ROOT.TChain.pproject ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
# The END
# =============================================================================
|
rwl/muntjac
|
refs/heads/master
|
muntjac/test/server/data/util/filter/__init__.py
|
12133432
| |
renyi533/tensorflow
|
refs/heads/master
|
tensorflow/compiler/xla/python/__init__.py
|
12133432
| |
redhat-openstack/horizon
|
refs/heads/mitaka-patches
|
openstack_dashboard/contrib/developer/theme_preview/tests.py
|
13
|
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core import urlresolvers
from django.core.urlresolvers import reverse
from importlib import import_module
from six import moves
from horizon import base
from openstack_dashboard.contrib.developer.dashboard import Developer
from openstack_dashboard.test import helpers as test
class ThemePreviewTests(test.TestCase):
# Manually register Developer Dashboard, as DEBUG=False in tests
def setUp(self):
super(ThemePreviewTests, self).setUp()
urlresolvers.clear_url_caches()
moves.reload_module(import_module(settings.ROOT_URLCONF))
base.Horizon.register(Developer)
base.Horizon._urls()
def tearDown(self):
super(ThemePreviewTests, self).tearDown()
base.Horizon.unregister(Developer)
base.Horizon._urls()
def test_index(self):
index = reverse('horizon:developer:theme_preview:index')
res = self.client.get(index)
self.assertTemplateUsed(res, 'developer/theme_preview/index.html')
|
foxbenjaminfox/deckofcards
|
refs/heads/master
|
deck/urls.py
|
2
|
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^new/$', 'deck.views.new_deck', name='new_deck_d'), #a week in and I am already deprecating things...
url(r'^shuffle/$', 'deck.views.shuffle', name='shuffle_d'), #deprecated - May 18, 2015
url(r'^shuffle/(?P<key>\w+)/$', 'deck.views.shuffle', name='shuffle_key_d'),#deprecated - May 18, 2015
url(r'^draw/(?P<key>\w+)/$', 'deck.views.draw', name='draw_d'),#deprecated - May 18, 2015
url(r'^deck/new/$', 'deck.views.new_deck', name='new_deck'),
url(r'^deck/new/shuffle/$', 'deck.views.shuffle', name='shuffle'),
url(r'^deck/(?P<key>\w+)/shuffle/$', 'deck.views.shuffle', name='shuffle_key'),
url(r'^deck/new/draw/$', 'deck.views.draw', name='new_draw'),
url(r'^deck/(?P<key>\w+)/draw/$', 'deck.views.draw', name='draw'),
url(r'^deck/(?P<key>\w+)/pile/(?P<pile>\w+)/add/$', 'deck.views.add_to_pile', name='add'),
url(r'^deck/(?P<key>\w+)/pile/(?P<pile>\w+)/draw/$', 'deck.views.draw_from_pile', name='draw_pile'),
)
|
merlink01/toxxmlrpc
|
refs/heads/master
|
toxxmlrpc/tox_xmlrpc_client.py
|
1
|
import toxclient
import threading
import xmlrpclib
import time
import sys
import logging
logger = logging.getLogger('Toxxmlrpc_Client')
class _Method:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class Toxxmlrpc_Client():
def __init__(self,path,password=None,server_id=None, disable_auto_login=True,timeout=10):
self.timeout = timeout
self.server_id = server_id
self.password = password
self.disable_auto_login = disable_auto_login
if disable_auto_login:
self.client = toxclient.Toxclient(path)
else:
self.client = toxclient.Toxclient(path,password)
self.exec_lock = threading.Lock()
def start(self):
self.client.start()
#~ if not self.disable_auto_login:
#~ while self.client.status == 'offline':
#~ time.sleep(1)
#~ logger.info('Client: %s'%self.client.status)
if self.server_id:
already_added = False
for f in self.client.get_friend_list():
if self.client.friend_get_public_key(f) in self.server_id:
already_added = True
logger.info('Server already in added')
break
if not already_added:
self.client.friend_add_with_request(self.server_id,self.password)
logger.info('Started Friend request to Server')
else:
logger.info('No Server ID given')
def stop(self):
self.client.stop()
def __request(self,methodname,args):
logger.info('Execute: %s%s'%(methodname,repr(args)))
data = xmlrpclib.dumps(args,methodname,allow_none=True)
self.exec_lock.acquire()
if not self.client.data_send(0,data,self.timeout):
logger.warning('Raising Error, Timeout reached')
self.exec_lock.release()
raise IOError, 'Timeout'
recdata = None
time_to_wait = int(time.time()) + self.timeout
while not recdata:
timenow = int(time.time())
if timenow > time_to_wait:
logger.warning('Raising Error, Timeout reached')
self.exec_lock.release()
raise IOError, 'Timeout'
recdata = self.client.data_recv()
time.sleep(0.1)
self.exec_lock.release()
returndata = xmlrpclib.loads(recdata['data'],use_datetime=True)
logger.info('got %s'%str(returndata))
return returndata[0][0]
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
|
rishirajsinghjhelumi/Entity-Mining
|
refs/heads/master
|
imdb/parser/sql/alchemyadapter.py
|
57
|
"""
parser.sql.alchemyadapter module (imdb.parser.sql package).
This module adapts the SQLAlchemy ORM to the internal mechanism.
Copyright 2008-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import sys
import logging
from sqlalchemy import *
from sqlalchemy import schema
try: from sqlalchemy import exc # 0.5
except ImportError: from sqlalchemy import exceptions as exc # 0.4
_alchemy_logger = logging.getLogger('imdbpy.parser.sql.alchemy')
try:
import migrate.changeset
HAS_MC = True
except ImportError:
HAS_MC = False
_alchemy_logger.warn('Unable to import migrate.changeset: Foreign ' \
'Keys will not be created.')
from imdb._exceptions import IMDbDataAccessError
from dbschema import *
# Used to convert table and column names.
re_upper = re.compile(r'([A-Z])')
# XXX: I'm not sure at all that this is the best method to connect
# to the database and bind that connection to every table.
metadata = MetaData()
# Maps our placeholders to SQLAlchemy's column types.
MAP_COLS = {
INTCOL: Integer,
UNICODECOL: UnicodeText,
STRINGCOL: String
}
class NotFoundError(IMDbDataAccessError):
"""Exception raised when Table.get(id) returns no value."""
pass
def _renameTable(tname):
"""Build the name of a table, as done by SQLObject."""
tname = re_upper.sub(r'_\1', tname)
if tname.startswith('_'):
tname = tname[1:]
return tname.lower()
def _renameColumn(cname):
"""Build the name of a column, as done by SQLObject."""
cname = cname.replace('ID', 'Id')
return _renameTable(cname)
class DNNameObj(object):
"""Used to access table.sqlmeta.columns[column].dbName (a string)."""
def __init__(self, dbName):
self.dbName = dbName
def __repr__(self):
return '<DNNameObj(dbName=%s) [id=%s]>' % (self.dbName, id(self))
class DNNameDict(object):
"""Used to access table.sqlmeta.columns (a dictionary)."""
def __init__(self, colMap):
self.colMap = colMap
def __getitem__(self, key):
return DNNameObj(self.colMap[key])
def __repr__(self):
return '<DNNameDict(colMap=%s) [id=%s]>' % (self.colMap, id(self))
class SQLMetaAdapter(object):
"""Used to access table.sqlmeta (an object with .table, .columns and
.idName attributes)."""
def __init__(self, table, colMap=None):
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
def __getattr__(self, name):
if name == 'table':
return getattr(self.table, name)
if name == 'columns':
return DNNameDict(self.colMap)
if name == 'idName':
return self.colMap.get('id', 'id')
return None
def __repr__(self):
return '<SQLMetaAdapter(table=%s, colMap=%s) [id=%s]>' % \
(repr(self.table), repr(self.colMap), id(self))
class QAdapter(object):
"""Used to access table.q attribute (remapped to SQLAlchemy table.c)."""
def __init__(self, table, colMap=None):
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
def __getattr__(self, name):
try: return getattr(self.table.c, self.colMap[name])
except KeyError, e: raise AttributeError("unable to get '%s'" % name)
def __repr__(self):
return '<QAdapter(table=%s, colMap=%s) [id=%s]>' % \
(repr(self.table), repr(self.colMap), id(self))
class RowAdapter(object):
"""Adapter for a SQLAlchemy RowProxy object."""
def __init__(self, row, table, colMap=None):
self.row = row
# FIXME: it's OBSCENE that 'table' should be passed from
# TableAdapter through ResultAdapter only to land here,
# where it's used to directly update a row item.
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
self.colMapKeys = colMap.keys()
def __getattr__(self, name):
try: return getattr(self.row, self.colMap[name])
except KeyError, e: raise AttributeError("unable to get '%s'" % name)
def __setattr__(self, name, value):
# FIXME: I can't even think about how much performances suffer,
# for this horrible hack (and it's used so rarely...)
# For sure something like a "property" to map column names
# to getter/setter functions would be much better, but it's
# not possible (or at least not easy) to build them for a
# single instance.
if name in self.__dict__.get('colMapKeys', ()):
# Trying to update a value in the database.
row = self.__dict__['row']
table = self.__dict__['table']
colMap = self.__dict__['colMap']
params = {colMap[name]: value}
table.update(table.c.id==row.id).execute(**params)
# XXX: minor bug: after a value is assigned with the
# 'rowAdapterInstance.colName = value' syntax, for some
# reason rowAdapterInstance.colName still returns the
# previous value (even if the database is updated).
# Fix it? I'm not even sure it's ever used.
return
# For every other attribute.
object.__setattr__(self, name, value)
def __repr__(self):
return '<RowAdapter(row=%s, table=%s, colMap=%s) [id=%s]>' % \
(repr(self.row), repr(self.table), repr(self.colMap), id(self))
class ResultAdapter(object):
"""Adapter for a SQLAlchemy ResultProxy object."""
def __init__(self, result, table, colMap=None):
self.result = result
self.table = table
if colMap is None:
colMap = {}
self.colMap = colMap
def count(self):
return len(self)
def __len__(self):
# FIXME: why sqlite returns -1? (that's wrooong!)
if self.result.rowcount == -1:
return 0
return self.result.rowcount
def __getitem__(self, key):
res = list(self.result)[key]
if not isinstance(key, slice):
# A single item.
return RowAdapter(res, self.table, colMap=self.colMap)
else:
# A (possible empty) list of items.
return [RowAdapter(x, self.table, colMap=self.colMap)
for x in res]
def __iter__(self):
for item in self.result:
yield RowAdapter(item, self.table, colMap=self.colMap)
def __repr__(self):
return '<ResultAdapter(result=%s, table=%s, colMap=%s) [id=%s]>' % \
(repr(self.result), repr(self.table),
repr(self.colMap), id(self))
class TableAdapter(object):
"""Adapter for a SQLAlchemy Table object, to mimic a SQLObject class."""
def __init__(self, table, uri=None):
"""Initialize a TableAdapter object."""
self._imdbpySchema = table
self._imdbpyName = table.name
self.connectionURI = uri
self.colMap = {}
columns = []
for col in table.cols:
# Column's paramters.
params = {'nullable': True}
params.update(col.params)
if col.name == 'id':
params['primary_key'] = True
if 'notNone' in params:
params['nullable'] = not params['notNone']
del params['notNone']
cname = _renameColumn(col.name)
self.colMap[col.name] = cname
colClass = MAP_COLS[col.kind]
colKindParams = {}
if 'length' in params:
colKindParams['length'] = params['length']
del params['length']
elif colClass is UnicodeText and col.index:
# XXX: limit length for UNICODECOLs that will have an index.
# this can result in name.name and title.title truncations!
colClass = Unicode
# Should work for most of the database servers.
length = 511
if self.connectionURI:
if self.connectionURI.startswith('mysql'):
# To stay compatible with MySQL 4.x.
length = 255
colKindParams['length'] = length
elif self._imdbpyName == 'PersonInfo' and col.name == 'info':
if self.connectionURI:
if self.connectionURI.startswith('ibm'):
# There are some entries longer than 32KB.
colClass = CLOB
# I really do hope that this space isn't wasted
# for each other shorter entry... <g>
colKindParams['length'] = 68*1024
colKind = colClass(**colKindParams)
if 'alternateID' in params:
# There's no need to handle them here.
del params['alternateID']
# Create a column.
colObj = Column(cname, colKind, **params)
columns.append(colObj)
self.tableName = _renameTable(table.name)
# Create the table.
self.table = Table(self.tableName, metadata, *columns)
self._ta_insert = self.table.insert()
self._ta_select = self.table.select
# Adapters for special attributes.
self.q = QAdapter(self.table, colMap=self.colMap)
self.sqlmeta = SQLMetaAdapter(self.table, colMap=self.colMap)
def select(self, conditions=None):
"""Return a list of results."""
result = self._ta_select(conditions).execute()
return ResultAdapter(result, self.table, colMap=self.colMap)
def get(self, theID):
"""Get an object given its ID."""
result = self.select(self.table.c.id == theID)
#if not result:
# raise NotFoundError, 'no data for ID %s' % theID
# FIXME: isn't this a bit risky? We can't check len(result),
# because sqlite returns -1...
# What about converting it to a list and getting the first item?
try:
return result[0]
except KeyError:
raise NotFoundError('no data for ID %s' % theID)
def dropTable(self, checkfirst=True):
"""Drop the table."""
dropParams = {'checkfirst': checkfirst}
# Guess what? Another work-around for a ibm_db bug.
if self.table.bind.engine.url.drivername.startswith('ibm_db'):
del dropParams['checkfirst']
try:
self.table.drop(**dropParams)
except exc.ProgrammingError:
# As above: re-raise the exception, but only if it's not ibm_db.
if not self.table.bind.engine.url.drivername.startswith('ibm_db'):
raise
def createTable(self, checkfirst=True):
"""Create the table."""
self.table.create(checkfirst=checkfirst)
# Create indexes for alternateID columns (other indexes will be
# created later, at explicit request for performances reasons).
for col in self._imdbpySchema.cols:
if col.name == 'id':
continue
if col.params.get('alternateID', False):
self._createIndex(col, checkfirst=checkfirst)
def _createIndex(self, col, checkfirst=True):
"""Create an index for a given (schema) column."""
# XXX: indexLen is ignored in SQLAlchemy, and that means that
# indexes will be over the whole 255 chars strings...
# NOTE: don't use a dot as a separator, or DB2 will do
# nasty things.
idx_name = '%s_%s' % (self.table.name, col.index or col.name)
if checkfirst:
for index in self.table.indexes:
if index.name == idx_name:
return
idx = Index(idx_name, getattr(self.table.c, self.colMap[col.name]))
# XXX: beware that exc.OperationalError can be raised, is some
# strange circumstances; that's why the index name doesn't
# follow the SQLObject convention, but includes the table name:
# sqlite, for example, expects index names to be unique at
# db-level.
try:
idx.create()
except exc.OperationalError, e:
_alchemy_logger.warn('Skipping creation of the %s.%s index: %s' %
(self.sqlmeta.table, col.name, e))
def addIndexes(self, ifNotExists=True):
"""Create all required indexes."""
for col in self._imdbpySchema.cols:
if col.index:
self._createIndex(col, checkfirst=ifNotExists)
def addForeignKeys(self, mapTables, ifNotExists=True):
"""Create all required foreign keys."""
if not HAS_MC:
return
# It seems that there's no reason to prevent the creation of
# indexes for columns with FK constrains: if there's already
# an index, the FK index is not created.
countCols = 0
for col in self._imdbpySchema.cols:
countCols += 1
if not col.foreignKey:
continue
fks = col.foreignKey.split('.', 1)
foreignTableName = fks[0]
if len(fks) == 2:
foreignColName = fks[1]
else:
foreignColName = 'id'
foreignColName = mapTables[foreignTableName].colMap.get(
foreignColName, foreignColName)
thisColName = self.colMap.get(col.name, col.name)
thisCol = self.table.columns[thisColName]
foreignTable = mapTables[foreignTableName].table
foreignCol = getattr(foreignTable.c, foreignColName)
# Need to explicitly set an unique name, otherwise it will
# explode, if two cols points to the same table.
fkName = 'fk_%s_%s_%d' % (foreignTable.name, foreignColName,
countCols)
constrain = migrate.changeset.ForeignKeyConstraint([thisCol],
[foreignCol],
name=fkName)
try:
constrain.create()
except exc.OperationalError:
continue
def __call__(self, *args, **kwds):
"""To insert a new row with the syntax: TableClass(key=value, ...)"""
taArgs = {}
for key, value in kwds.items():
taArgs[self.colMap.get(key, key)] = value
self._ta_insert.execute(*args, **taArgs)
def __repr__(self):
return '<TableAdapter(table=%s) [id=%s]>' % (repr(self.table), id(self))
# Module-level "cache" for SQLObject classes, to prevent
# "Table 'tableName' is already defined for this MetaData instance" errors,
# when two or more connections to the database are made.
# XXX: is this the best way to act?
TABLES_REPOSITORY = {}
def getDBTables(uri=None):
"""Return a list of TableAdapter objects to be used to access the
database through the SQLAlchemy ORM. The connection uri is optional, and
can be used to tailor the db schema to specific needs."""
DB_TABLES = []
for table in DB_SCHEMA:
if table.name in TABLES_REPOSITORY:
DB_TABLES.append(TABLES_REPOSITORY[table.name])
continue
tableAdapter = TableAdapter(table, uri)
DB_TABLES.append(tableAdapter)
TABLES_REPOSITORY[table.name] = tableAdapter
return DB_TABLES
# Functions used to emulate SQLObject's logical operators.
def AND(*params):
"""Emulate SQLObject's AND."""
return and_(*params)
def OR(*params):
"""Emulate SQLObject's OR."""
return or_(*params)
def IN(item, inList):
"""Emulate SQLObject's IN."""
if not isinstance(item, schema.Column):
return OR(*[x == item for x in inList])
else:
return item.in_(inList)
def ISNULL(x):
"""Emulate SQLObject's ISNULL."""
# XXX: Should we use null()? Can null() be a global instance?
# XXX: Is it safe to test None with the == operator, in this case?
return x == None
def ISNOTNULL(x):
"""Emulate SQLObject's ISNOTNULL."""
return x != None
def CONTAINSSTRING(expr, pattern):
"""Emulate SQLObject's CONTAINSSTRING."""
return expr.like('%%%s%%' % pattern)
def toUTF8(s):
"""For some strange reason, sometimes SQLObject wants utf8 strings
instead of unicode; with SQLAlchemy we just return the unicode text."""
return s
class _AlchemyConnection(object):
"""A proxy for the connection object, required since _ConnectionFairy
uses __slots__."""
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def setConnection(uri, tables, encoding='utf8', debug=False):
"""Set connection for every table."""
# FIXME: why on earth MySQL requires an additional parameter,
# is well beyond my understanding...
if uri.startswith('mysql'):
if '?' in uri:
uri += '&'
else:
uri += '?'
uri += 'charset=%s' % encoding
params = {'encoding': encoding}
if debug:
params['echo'] = True
if uri.startswith('ibm_db'):
# Try to work-around a possible bug of the ibm_db DB2 driver.
params['convert_unicode'] = True
# XXX: is this the best way to connect?
engine = create_engine(uri, **params)
metadata.bind = engine
eng_conn = engine.connect()
if uri.startswith('sqlite'):
major = sys.version_info[0]
minor = sys.version_info[1]
if major > 2 or (major == 2 and minor > 5):
eng_conn.connection.connection.text_factory = str
# XXX: OH MY, THAT'S A MESS!
# We need to return a "connection" object, with the .dbName
# attribute set to the db engine name (e.g. "mysql"), .paramstyle
# set to the style of the paramters for query() calls, and the
# .module attribute set to a module (?) with .OperationalError and
# .IntegrityError attributes.
# Another attribute of "connection" is the getConnection() function,
# used to return an object with a .cursor() method.
connection = _AlchemyConnection(eng_conn.connection)
paramstyle = eng_conn.dialect.paramstyle
connection.module = eng_conn.dialect.dbapi
connection.paramstyle = paramstyle
connection.getConnection = lambda: connection.connection
connection.dbName = engine.url.drivername
return connection
|
chrisrossx/DotStar_Emulator
|
refs/heads/master
|
DotStar_Emulator/emulator/utils.py
|
1
|
import os
import math
import pygame
from .vector2 import Vector2
MEDIA_PATH = os.path.join(os.path.dirname(__file__), 'media')
def blend_color(color1, color2, blend_ratio):
"""
Blend two colors together given the blend_ration
:param color1: pygame.Color
:param color2: pygame.Color
:param blend_ratio: float between 0.0 and 1.0
:return: pygame.Color
"""
r = color1.r + (color2.r - color1.r) * blend_ratio
g = color1.g + (color2.g - color1.g) * blend_ratio
b = color1.b + (color2.b - color1.b) * blend_ratio
a = color1.a + (color2.a - color1.a) * blend_ratio
return pygame.Color(int(r), int(g), int(b), int(a))
def vector2_to_floor(v2):
"""
call math.floor on Vector2.x and Vector.y
:param v2: pygame.math.Vector2
:return: A new Vector2 with the x, y values math.floor
"""
return Vector2(math.floor(v2.x), math.floor(v2.y))
def vector2_to_int(v2):
"""
call int on Vector2.x and Vector.y
:param v2: pygame.math.Vector2
:return: A (x, y) integer tuple
"""
return int(v2.x), int(v2.y)
|
oren88/vasputil
|
refs/heads/master
|
vasputil/tests/test_dos.py
|
1
|
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
# Copyright (c) 2008, 2010 Janne Blomqvist
# This source code file is subject to the terms of the MIT (Expat)
# License. See the file LICENSE for details.
"""This module contains unit tests for the vasputil.dos module."""
import unittest
import vasputil.dos as d
class LdosTestCase(unittest.TestCase):
"""Testcase for vasputil.dos.LDOS class."""
def suite():
ldos_suite = unittest.TestLoader().loadTestsFromTestCase(LdosTestCase)
return unittest.TestSuite([ldos_suite])
if __name__ == "__main__":
unittest.main()
|
KamalAwasthi/FullContact
|
refs/heads/master
|
myvenv/lib/python3.4/site-packages/pip/commands/check.py
|
336
|
import logging
from pip.basecommand import Command
from pip.operations.check import check_requirements
from pip.utils import get_installed_distributions
logger = logging.getLogger(__name__)
class CheckCommand(Command):
"""Verify installed packages have compatible dependencies."""
name = 'check'
usage = """
%prog [options]"""
summary = 'Verify installed packages have compatible dependencies.'
def run(self, options, args):
dists = get_installed_distributions(local_only=False, skip=())
missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists)
for dist in dists:
key = '%s==%s' % (dist.project_name, dist.version)
for requirement in missing_reqs_dict.get(key, []):
logger.info(
"%s %s requires %s, which is not installed.",
dist.project_name, dist.version, requirement.project_name)
for requirement, actual in incompatible_reqs_dict.get(key, []):
logger.info(
"%s %s has requirement %s, but you have %s %s.",
dist.project_name, dist.version, requirement,
actual.project_name, actual.version)
if missing_reqs_dict or incompatible_reqs_dict:
return 1
else:
logger.info("No broken requirements found.")
|
kapari/django-oscar
|
refs/heads/master
|
src/oscar/apps/dashboard/offers/views.py
|
23
|
import datetime
import json
from django.views.generic import ListView, FormView, DeleteView
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.core import serializers
from django.core.serializers.json import DjangoJSONEncoder
from oscar.core.loading import get_classes, get_class, get_model
from oscar.views import sort_queryset
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Condition = get_model('offer', 'Condition')
Range = get_model('offer', 'Range')
Product = get_model('catalogue', 'Product')
OrderDiscount = get_model('order', 'OrderDiscount')
Benefit = get_model('offer', 'Benefit')
MetaDataForm, ConditionForm, BenefitForm, RestrictionsForm, OfferSearchForm \
= get_classes('dashboard.offers.forms',
['MetaDataForm', 'ConditionForm', 'BenefitForm',
'RestrictionsForm', 'OfferSearchForm'])
OrderDiscountCSVFormatter = get_class(
'dashboard.offers.reports', 'OrderDiscountCSVFormatter')
class OfferListView(ListView):
model = ConditionalOffer
context_object_name = 'offers'
template_name = 'dashboard/offers/offer_list.html'
form_class = OfferSearchForm
def get_queryset(self):
qs = self.model._default_manager.exclude(
offer_type=ConditionalOffer.VOUCHER)
qs = sort_queryset(qs, self.request,
['name', 'start_datetime', 'end_datetime',
'num_applications', 'total_discount'])
self.description = _("All offers")
# We track whether the queryset is filtered to determine whether we
# show the search form 'reset' button.
self.is_filtered = False
self.form = self.form_class(self.request.GET)
if not self.form.is_valid():
return qs
data = self.form.cleaned_data
if data['name']:
qs = qs.filter(name__icontains=data['name'])
self.description = _("Offers matching '%s'") % data['name']
self.is_filtered = True
if data['is_active']:
self.is_filtered = True
today = datetime.date.today()
qs = qs.filter(start_date__lte=today, end_date__gte=today)
return qs
def get_context_data(self, **kwargs):
ctx = super(OfferListView, self).get_context_data(**kwargs)
ctx['queryset_description'] = self.description
ctx['form'] = self.form
ctx['is_filtered'] = self.is_filtered
return ctx
class OfferWizardStepView(FormView):
wizard_name = 'offer_wizard'
form_class = None
step_name = None
update = False
url_name = None
# Keep a reference to previous view class to allow checks to be made on
# whether prior steps have been completed
previous_view = None
def dispatch(self, request, *args, **kwargs):
if self.update:
self.offer = get_object_or_404(ConditionalOffer, id=kwargs['pk'])
if not self.is_previous_step_complete(request):
messages.warning(
request, _("%s step not complete") % (
self.previous_view.step_name.title(),))
return HttpResponseRedirect(self.get_back_url())
return super(OfferWizardStepView, self).dispatch(request, *args,
**kwargs)
def is_previous_step_complete(self, request):
if not self.previous_view:
return True
return self.previous_view.is_valid(self, request)
def _key(self, step_name=None, is_object=False):
key = step_name if step_name else self.step_name
if self.update:
key += str(self.offer.id)
if is_object:
key += '_obj'
return key
def _store_form_kwargs(self, form):
session_data = self.request.session.setdefault(self.wizard_name, {})
# Adjust kwargs to avoid trying to save the range instance
form_data = form.cleaned_data.copy()
range = form_data.get('range', None)
if range is not None:
form_data['range_id'] = range.id
del form_data['range']
form_kwargs = {'data': form_data}
json_data = json.dumps(form_kwargs, cls=DjangoJSONEncoder)
session_data[self._key()] = json_data
self.request.session.save()
def _fetch_form_kwargs(self, step_name=None):
if not step_name:
step_name = self.step_name
session_data = self.request.session.setdefault(self.wizard_name, {})
json_data = session_data.get(self._key(step_name), None)
if json_data:
form_kwargs = json.loads(json_data)
if 'range_id' in form_kwargs['data']:
form_kwargs['data']['range'] = Range.objects.get(
id=form_kwargs['data']['range_id'])
del form_kwargs['data']['range_id']
return form_kwargs
return {}
def _store_object(self, form):
session_data = self.request.session.setdefault(self.wizard_name, {})
# We don't store the object instance as that is not JSON serialisable.
# Instead, we save an alternative form
instance = form.save(commit=False)
json_qs = serializers.serialize('json', [instance])
session_data[self._key(is_object=True)] = json_qs
self.request.session.save()
def _fetch_object(self, step_name, request=None):
if request is None:
request = self.request
session_data = request.session.setdefault(self.wizard_name, {})
json_qs = session_data.get(self._key(step_name, is_object=True), None)
if json_qs:
# Recreate model instance from passed data
deserialised_obj = list(serializers.deserialize('json', json_qs))
return deserialised_obj[0].object
def _fetch_session_offer(self):
"""
Return the offer instance loaded with the data stored in the
session. When updating an offer, the updated fields are used with the
existing offer data.
"""
offer = self._fetch_object('metadata')
if offer is None and self.update:
offer = self.offer
return offer
def _flush_session(self):
self.request.session[self.wizard_name] = {}
self.request.session.save()
def get_form_kwargs(self, *args, **kwargs):
form_kwargs = {}
if self.update:
form_kwargs['instance'] = self.get_instance()
session_kwargs = self._fetch_form_kwargs()
form_kwargs.update(session_kwargs)
parent_kwargs = super(OfferWizardStepView, self).get_form_kwargs(
*args, **kwargs)
form_kwargs.update(parent_kwargs)
return form_kwargs
def get_context_data(self, **kwargs):
ctx = super(OfferWizardStepView, self).get_context_data(**kwargs)
if self.update:
ctx['offer'] = self.offer
ctx['session_offer'] = self._fetch_session_offer()
ctx['title'] = self.get_title()
return ctx
def get_back_url(self):
if not self.previous_view:
return None
if self.update:
return reverse(self.previous_view.url_name,
kwargs={'pk': self.kwargs['pk']})
return reverse(self.previous_view.url_name)
def get_title(self):
return self.step_name.title()
def form_valid(self, form):
self._store_form_kwargs(form)
self._store_object(form)
if self.update and 'save' in form.data:
# Save changes to this offer when updating and pressed save button
return self.save_offer(self.offer)
else:
# Proceed to next page
return super(OfferWizardStepView, self).form_valid(form)
def save_offer(self, offer):
# We update the offer with the name/description from step 1
session_offer = self._fetch_session_offer()
offer.name = session_offer.name
offer.description = session_offer.description
# Save the related models, then save the offer.
benefit = self._fetch_object('benefit')
benefit.save()
condition = self._fetch_object('condition')
condition.save()
offer.benefit = benefit
offer.condition = condition
offer.save()
self._flush_session()
if self.update:
msg = _("Offer '%s' updated") % offer.name
else:
msg = _("Offer '%s' created!") % offer.name
messages.success(self.request, msg)
return HttpResponseRedirect(reverse(
'dashboard:offer-detail', kwargs={'pk': offer.pk}))
def get_success_url(self):
if self.update:
return reverse(self.success_url_name,
kwargs={'pk': self.kwargs['pk']})
return reverse(self.success_url_name)
@classmethod
def is_valid(cls, current_view, request):
if current_view.update:
return True
return current_view._fetch_object(cls.step_name, request) is not None
class OfferMetaDataView(OfferWizardStepView):
step_name = 'metadata'
form_class = MetaDataForm
template_name = 'dashboard/offers/metadata_form.html'
url_name = 'dashboard:offer-metadata'
success_url_name = 'dashboard:offer-benefit'
def get_instance(self):
return self.offer
def get_title(self):
return _("Name and description")
class OfferBenefitView(OfferWizardStepView):
step_name = 'benefit'
form_class = BenefitForm
template_name = 'dashboard/offers/benefit_form.html'
url_name = 'dashboard:offer-benefit'
success_url_name = 'dashboard:offer-condition'
previous_view = OfferMetaDataView
def get_instance(self):
return self.offer.benefit
def get_title(self):
# This is referred to as the 'incentive' within the dashboard.
return _("Incentive")
class OfferConditionView(OfferWizardStepView):
step_name = 'condition'
form_class = ConditionForm
template_name = 'dashboard/offers/condition_form.html'
url_name = 'dashboard:offer-condition'
success_url_name = 'dashboard:offer-restrictions'
previous_view = OfferBenefitView
def get_instance(self):
return self.offer.condition
class OfferRestrictionsView(OfferWizardStepView):
step_name = 'restrictions'
form_class = RestrictionsForm
template_name = 'dashboard/offers/restrictions_form.html'
previous_view = OfferConditionView
url_name = 'dashboard:offer-restrictions'
def form_valid(self, form):
offer = form.save(commit=False)
return self.save_offer(offer)
def get_instance(self):
return self.offer
def get_title(self):
return _("Restrictions")
class OfferDeleteView(DeleteView):
model = ConditionalOffer
template_name = 'dashboard/offers/offer_delete.html'
context_object_name = 'offer'
def get_success_url(self):
messages.success(self.request, _("Offer deleted!"))
return reverse('dashboard:offer-list')
class OfferDetailView(ListView):
# Slightly odd, but we treat the offer detail view as a list view so the
# order discounts can be browsed.
model = OrderDiscount
template_name = 'dashboard/offers/offer_detail.html'
context_object_name = 'order_discounts'
def dispatch(self, request, *args, **kwargs):
self.offer = get_object_or_404(ConditionalOffer, pk=kwargs['pk'])
return super(OfferDetailView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if 'suspend' in request.POST:
return self.suspend()
elif 'unsuspend' in request.POST:
return self.unsuspend()
def suspend(self):
if self.offer.is_suspended:
messages.error(self.request, _("Offer is already suspended"))
else:
self.offer.suspend()
messages.success(self.request, _("Offer suspended"))
return HttpResponseRedirect(
reverse('dashboard:offer-detail', kwargs={'pk': self.offer.pk}))
def unsuspend(self):
if not self.offer.is_suspended:
messages.error(
self.request,
_("Offer cannot be reinstated as it is not currently "
"suspended"))
else:
self.offer.unsuspend()
messages.success(self.request, _("Offer reinstated"))
return HttpResponseRedirect(
reverse('dashboard:offer-detail', kwargs={'pk': self.offer.pk}))
def get_queryset(self):
return self.model.objects.filter(offer_id=self.offer.pk)
def get_context_data(self, **kwargs):
ctx = super(OfferDetailView, self).get_context_data(**kwargs)
ctx['offer'] = self.offer
return ctx
def render_to_response(self, context):
if self.request.GET.get('format') == 'csv':
formatter = OrderDiscountCSVFormatter()
return formatter.generate_response(context['order_discounts'],
offer=self.offer)
return super(OfferDetailView, self).render_to_response(context)
|
leilihh/novaha
|
refs/heads/kvm_ha_tmp
|
nova/scheduler/filters/trusted_filter.py
|
13
|
# Copyright (c) 2012 Intel, Inc.
# Copyright (c) 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Filter to add support for Trusted Computing Pools.
Filter that only schedules tasks on a host if the integrity (trust)
of that host matches the trust requested in the `extra_specs' for the
flavor. The `extra_specs' will contain a key/value pair where the
key is `trust'. The value of this pair (`trusted'/`untrusted') must
match the integrity of that host (obtained from the Attestation
service) before the task can be scheduled on that host.
Note that the parameters to control access to the Attestation Service
are in the `nova.conf' file in a separate `trust' section. For example,
the config file will look something like:
[DEFAULT]
verbose=True
...
[trust]
server=attester.mynetwork.com
Details on the specific parameters can be found in the file `trust_attest.py'.
Details on setting up and using an Attestation Service can be found at
the Open Attestation project at:
https://github.com/OpenAttestation/OpenAttestation
"""
import httplib
import socket
import ssl
from oslo.config import cfg
from nova import context
from nova import db
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
trusted_opts = [
cfg.StrOpt('attestation_server',
help='Attestation server HTTP'),
cfg.StrOpt('attestation_server_ca_file',
help='Attestation server Cert file for Identity verification'),
cfg.StrOpt('attestation_port',
default='8443',
help='Attestation server port'),
cfg.StrOpt('attestation_api_url',
default='/OpenAttestationWebServices/V1.0',
help='Attestation web API URL'),
cfg.StrOpt('attestation_auth_blob',
help='Attestation authorization blob - must change'),
cfg.IntOpt('attestation_auth_timeout',
default=60,
help='Attestation status cache valid period length'),
]
CONF = cfg.CONF
trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters')
CONF.register_group(trust_group)
CONF.register_opts(trusted_opts, group=trust_group)
class HTTPSClientAuthConnection(httplib.HTTPSConnection):
"""Class to make a HTTPS connection, with support for full client-based
SSL Authentication
"""
def __init__(self, host, port, key_file, cert_file, ca_file, timeout=None):
httplib.HTTPSConnection.__init__(self, host,
key_file=key_file,
cert_file=cert_file)
self.host = host
self.port = port
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.timeout = timeout
def connect(self):
"""Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to
ssl.wrap_socket(), which forces SSL to check server certificate
against our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ca_certs=self.ca_file,
cert_reqs=ssl.CERT_REQUIRED)
class AttestationService(object):
# Provide access wrapper to attestation server to get integrity report.
def __init__(self):
self.api_url = CONF.trusted_computing.attestation_api_url
self.host = CONF.trusted_computing.attestation_server
self.port = CONF.trusted_computing.attestation_port
self.auth_blob = CONF.trusted_computing.attestation_auth_blob
self.key_file = None
self.cert_file = None
self.ca_file = CONF.trusted_computing.attestation_server_ca_file
self.request_count = 100
def _do_request(self, method, action_url, body, headers):
# Connects to the server and issues a request.
# :returns: result data
# :raises: IOError if the request fails
action_url = "%s/%s" % (self.api_url, action_url)
try:
c = HTTPSClientAuthConnection(self.host, self.port,
key_file=self.key_file,
cert_file=self.cert_file,
ca_file=self.ca_file)
c.request(method, action_url, body, headers)
res = c.getresponse()
status_code = res.status
if status_code in (httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.NO_CONTENT):
return httplib.OK, res
return status_code, None
except (socket.error, IOError):
return IOError, None
def _request(self, cmd, subcmd, hosts):
body = {}
body['count'] = len(hosts)
body['hosts'] = hosts
cooked = jsonutils.dumps(body)
headers = {}
headers['content-type'] = 'application/json'
headers['Accept'] = 'application/json'
if self.auth_blob:
headers['x-auth-blob'] = self.auth_blob
status, res = self._do_request(cmd, subcmd, cooked, headers)
if status == httplib.OK:
data = res.read()
return status, jsonutils.loads(data)
else:
return status, None
def do_attestation(self, hosts):
"""Attests compute nodes through OAT service.
:param hosts: hosts list to be attested
:returns: dictionary for trust level and validate time
"""
result = None
status, data = self._request("POST", "PollHosts", hosts)
if data != None:
result = data.get('hosts')
return result
class ComputeAttestationCache(object):
"""Cache for compute node attestation
Cache compute node's trust level for sometime,
if the cache is out of date, poll OAT service to flush the
cache.
OAT service may have cache also. OAT service's cache valid time
should be set shorter than trusted filter's cache valid time.
"""
def __init__(self):
self.attestservice = AttestationService()
self.compute_nodes = {}
admin = context.get_admin_context()
# Fetch compute node list to initialize the compute_nodes,
# so that we don't need poll OAT service one by one for each
# host in the first round that scheduler invokes us.
computes = db.compute_node_get_all(admin)
for compute in computes:
service = compute['service']
if not service:
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
host = service['host']
self._init_cache_entry(host)
def _cache_valid(self, host):
cachevalid = False
if host in self.compute_nodes:
node_stats = self.compute_nodes.get(host)
if not timeutils.is_older_than(
node_stats['vtime'],
CONF.trusted_computing.attestation_auth_timeout):
cachevalid = True
return cachevalid
def _init_cache_entry(self, host):
self.compute_nodes[host] = {
'trust_lvl': 'unknown',
'vtime': timeutils.normalize_time(
timeutils.parse_isotime("1970-01-01T00:00:00Z"))}
def _invalidate_caches(self):
for host in self.compute_nodes:
self._init_cache_entry(host)
def _update_cache_entry(self, state):
entry = {}
host = state['host_name']
entry['trust_lvl'] = state['trust_lvl']
try:
# Normalize as naive object to interoperate with utcnow().
entry['vtime'] = timeutils.normalize_time(
timeutils.parse_isotime(state['vtime']))
except ValueError:
# Mark the system as un-trusted if get invalid vtime.
entry['trust_lvl'] = 'unknown'
entry['vtime'] = timeutils.utcnow()
self.compute_nodes[host] = entry
def _update_cache(self):
self._invalidate_caches()
states = self.attestservice.do_attestation(self.compute_nodes.keys())
if states is None:
return
for state in states:
self._update_cache_entry(state)
def get_host_attestation(self, host):
"""Check host's trust level."""
if host not in self.compute_nodes:
self._init_cache_entry(host)
if not self._cache_valid(host):
self._update_cache()
level = self.compute_nodes.get(host).get('trust_lvl')
return level
class ComputeAttestation(object):
def __init__(self):
self.caches = ComputeAttestationCache()
def is_trusted(self, host, trust):
level = self.caches.get_host_attestation(host)
return trust == level
class TrustedFilter(filters.BaseHostFilter):
"""Trusted filter to support Trusted Compute Pools."""
def __init__(self):
self.compute_attestation = ComputeAttestation()
def host_passes(self, host_state, filter_properties):
instance = filter_properties.get('instance_type', {})
extra = instance.get('extra_specs', {})
trust = extra.get('trust:trusted_host')
host = host_state.host
if trust:
return self.compute_attestation.is_trusted(host, trust)
return True
|
ESS-LLP/erpnext
|
refs/heads/develop
|
erpnext/education/doctype/student_language/__init__.py
|
12133432
| |
synergeticsedx/deployment-wipro
|
refs/heads/oxa/master.fic
|
lms/djangoapps/certificates/management/__init__.py
|
12133432
| |
dogukantufekci/supersalon
|
refs/heads/master
|
supersalon/professionals/migrations/__init__.py
|
12133432
| |
coffenbacher/neo4django
|
refs/heads/master
|
neo4django/admin/templatetags/__init__.py
|
12133432
| |
lizadaly/nanogenmo2015
|
refs/heads/master
|
saga3/saga.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import inspect
import logging
import random
log = logging.getLogger()
DEFAULT_SHERIFF_DELAY = 20
DEFAULT_NUM_BULLETS = 5
DEFAULT_HEALTH = 5
MAX_SCENES = 350 # ~150 words per scene
# Initiatives
HIGH_INITIATIVE = 30
MEDIUM_INITIATIVE = 20
DEFAULT_INITIATIVE = 10
GUN_DAMAGE = {'miss': {'health': 0,
'message': 'MISSED'},
'nick': {'health': -1,
'message': '{} NICKED'},
'hit': {'health': -2,
'message': '{} HIT'}}
class Stage(object):
"""The world model"""
elapsed_time = 0
current_scene = 0
@property
def actors(self):
"""Returns all the objects in the world that are people"""
return [obj for obj in self.objects if hasattr(obj, 'body')]
def find(self, obj_name):
"""Find an object by name in the world and return the object"""
return next(obj for obj in self.objects + self.places if obj.name == obj_name)
def __init__(self):
self.objects = []
self.places = []
stage = Stage()
def check_initiative(actors):
"""For each actor, find out who gets to move next"""
return max(actors, key=lambda x: x.initiative(), default=actors[0])
def action(actor):
"""At each step, evaluate what happens next"""
# By default, let the current actor do his thing
log.debug("Starting action for actor %s", actor)
actor.set_starting_location(actor.default_location)
actor.act()
stage.elapsed_time += 1
# Determine who acts next
next_actor = check_initiative(stage.actors)
if next_actor.escaped:
return next_actor
# If it's the same actor, just call this again
if next_actor == actor:
return action(actor)
return next_actor
class Thing(object):
"""An object with a name"""
location = None
def move_to(self, place):
"""Move an object from a current container (if it has one) to a new one."""
# Drop it from its current location if it has one
if self.location:
self.location = None
self.location = place
def __init__(self, name, preposition='on'):
stage.objects.append(self)
self.name = name
self.preposition = preposition
def __repr__(self):
return self.name
def __str__(self):
return self.name
def status(self):
if self.location and not isinstance(self.location, Person): # Don't print the status of body parts
if isinstance(self.location, Place):
return "the {} is {} the {}".format(self.name, self.location.preposition, self.location.name).capitalize()
if isinstance(self.location, Thing):
return "the {} is {} the {}".format(self.name, self.location.preposition, self.location.name).capitalize()
class Place(Thing):
"""A Place never has a location, and it doesn't print itself out in the world description."""
is_open = True
is_openable = False
def __init__(self, name=None):
super(Place, self).__init__(name)
stage.places.append(self)
class Door(Place):
"""A door is a place that can be open or closed. If it's open, we'll print a different message when the actor
moves through it than an ordinary place"""
is_openable = True
is_open = False
def close(self):
print("close door")
self.is_open = False
def open(self):
print("open door")
self.is_open = True
class Person(Thing):
"""A person who has hands and a location and will exhibit behavior"""
stage = None # Hook for the world model
enemy = None # Kinda cheating but makes things easy
default_location = None
health = 0 # -1 is dead, but we'll revive them on init
is_dead = False
inebriation = 0
def initiative(self):
"""Return a value representative of how much this actor wants to do something based on their state"""
if self.is_dead: # If they're already dead they're pretty lacking in initiative
return -9999
# If they _just_ died, give them a huge initiative bonus so we "cut" to their scene
if self.health <= 0:
return 9999
actor_initiative = random.randrange(0, DEFAULT_INITIATIVE)
if len(self.path) > 0: # Actor really wants to be somewhere
actor_initiative += HIGH_INITIATIVE
#log.debug("+ %s init change for path movement: %s/%s", self.name, HIGH_INITIATIVE, actor_initiative)
# If they're injured they're pretty mad
injury_bonus = DEFAULT_HEALTH - self.health
actor_initiative += injury_bonus
#log.debug("+ %s init change for injury bonus: %s/%s", self.name, injury_bonus, actor_initiative)
# They're also more excited if they're almost out of bullets
if self.get_if_held(Gun):
bullet_bonus = 10 if self.get_if_held(Gun).num_bullets == 1 else 0
actor_initiative += bullet_bonus
#log.debug("- %s init change for bullet bonus: %s/%s", self.name, bullet_bonus, actor_initiative)
return max(1, actor_initiative)
def act(self):
"""Do whatever is the next queued event"""
# If the actor just died, oops
if self.health <= 0:
print("{} dies.".format(self.name))
self.is_dead = True
return
# If there's a queued event, hit that first
if len(self.queue) > 0:
cmd, *args = self.queue[0]
log.debug("Running queued command: %s %s", cmd, args)
if args:
cmd(*args)
else:
cmd()
self.queue = self.queue[1:]
return
# If there's a target location, try to go there
if len(self.path) > 0:
log.debug("Got a path event, walking it")
next_location = self.path[0]
if self.go(next_location):
# If going there was successful, set their new location and drop it from the path
self.path = self.path[1:]
return
# If the enemy is present, try to kill them!
if self.enemy_is_present():
# If we don't have the gun, go find it!
if isinstance(self, Sheriff): # Lame
gun = stage.find("sheriff's gun")
else:
gun = stage.find("gun")
if self.get_if_held(gun):
self.shoot(self.enemy)
else:
# Immediately go to the location where the gun is (unless the location is a supporter)
target_location = gun.location
self.go(target_location)
# ...then queue taking the gun and shooting it!
self.queue.append((self.shoot, self.enemy))
self.queue.append((self.take, gun))
return
# If the enemy is dead, take the money and run
if self.enemy.is_dead:
# Blow out the gun if we still have it
gun = self.get_if_held(Gun)
holster = self.get_if_held(Holster)
if gun and not gun.location == holster:
print("blow out barrel")
self.queue.append((self.drop, gun, holster))
return True
log.debug("*** Trying to get the money")
money = self.stage.find('money')
if self.location == money.location:
return self.take(money)
# End game! Flee with the money!
if self.get_if_held('money'):
self.path = ['door', None]
self.escaped = True
# Random behaviors
weighted_choice = [('drink', 5), ('wander', 3), ('check', 1), ('lean', 1), ('count', 1), ('drop', 1)]
choice = random.choice([val for val, cnt in weighted_choice for i in range(cnt)])
log.debug("%s chose to %s", self.name, choice)
if choice == 'drink':
# Try to drink from the glass if we're holding it
glass = stage.find('glass')
if self.get_if_held('glass'):
# ...and it's full, just drink from it
if glass.full:
glass.drink(self)
return True
# If not, try to pour a glass from the bottle
else:
bottle = stage.find('bottle')
if self.get_if_held(bottle):
bottle.pour(glass)
# Be sure to add queued events in reverse order because queues
self.queue.append((glass.drink, self))
self.queue.append((self.take, glass))
return True
# If we don't have the bottle and can reach it, take it and
# then queue pouring it and drinking from it
else:
if self.can_reach_obj(bottle):
self.take(bottle)
self.queue.append((glass.drink, self))
self.queue.append((self.take, glass))
self.queue.append((bottle.pour, glass))
return True
# If we don't have the glass, try to get it
else:
if self.can_reach_obj(glass):
self.take(glass)
return True
elif choice == 'wander':
return self.go_to_random_location()
elif choice == 'check':
if self.get_if_held(Gun):
print("check gun")
return True
elif choice == 'count':
if self.can_reach_obj(stage.find('money')):
print("count money")
return True
elif choice == 'lean':
if self.location == stage.find('window'):
print('lean on window and look')
return True
elif choice == 'drop': # Drop a random object that isn't the gun
obj = self.get_held_obj(self.right_hand)
if obj and not isinstance(obj, Gun):
self.drop(obj, self.location)
return True
else:
obj = self.get_held_obj(self.left_hand)
if obj and not isinstance(obj, Gun):
self.drop(obj, self.location)
return True
# If we fell threw and did nothing, try again
return self.act()
def can_reach_obj(self, obj):
"""True if the Person can reach the object in question. The object must be either directly
in the same location, or on a visible supporter in the location"""
if self.location == obj.location:
return True
if hasattr(obj.location, 'location') and obj.location.location == self.location:
return True
def take(self, obj):
"""Try to take an object. If there's no hand available, drop an object and queue taking
the object. Return True if the object was taken or False if no hands available."""
free_hand = self.free_hand()
if free_hand:
print("pick up the {} with the {}".format(obj, free_hand))
obj.move_to(free_hand)
return True
else:
# Drop the thing in a random hand and queue picking up the thing
self.drop(self.get_held_obj(random.choice((self.right_hand, self.left_hand))), self.location)
self.queue.append((self.take, obj))
def go_to_random_location(self):
"""Randomly go to a location that isn't the current one"""
location = random.choice([place for place in stage.places if place != self.location and not isinstance(place, Door)])
self.go(location)
def enemy_is_present(self):
"""Is the enemy visible and suitably shootable?"""
return self.enemy.location != None and self.enemy.is_alive
def shoot(self, target, aimed=False):
"""Shoot first, ask questions never"""
gun = self.get_if_held(Gun)
if gun:
# Usually we'll aim and then fire, sometimes we'll just fire
if not aimed:
if random.randint(0, 5) > 1:
print("aim")
self.queue.append((self.shoot, target, True))
return False
print("fire")
log.debug("%s is trying to shoot %s", self.name, target.name)
hit_weight = self.starting_hit_weight()
if gun.num_bullets == 1:
hit_weight += 1
if self.health < DEFAULT_HEALTH:
hit_weight += 1
weighted_hit_or_miss = [('miss', 3), ('nick', 3 * hit_weight), ('hit', 1 * hit_weight)]
hit_or_nick = random.choice([val for val, cnt in weighted_hit_or_miss for i in range(cnt)])
print(GUN_DAMAGE[hit_or_nick]['message'].format(target.name))
target.health += GUN_DAMAGE[hit_or_nick]['health']
gun.num_bullets -= 1
return True
def starting_hit_weight(self):
"""Return a state-dependent starting weight that can increase or decrease the likelihood of
the actor making a successful shot."""
return 1
def go(self, location):
"""Try to move to the next location. If that location can be opened, like a door, open it first.
Otherwise, set the new location. If `location` is a string, find the
name of that location in the world."""
if isinstance(location, str):
location = self.stage.find(location)
log.debug("Trying to go to next location %s", location)
if location.is_openable and not location.is_open:
location.open()
return False
if location.is_openable and location.is_open:
print("go through {}".format(location))
self.queue.append((location.close,))
else:
print("go to {}".format(location))
self.location = location
return True
def get_if_held(self, obj_name):
"""Does the actor have the object name, object, or classname in any of its body parts? If so, return the container where it is"""
# First check if it's a classname (like Gun)
if inspect.isclass(obj_name):
# Check all the world models for objects of this type and try to find a match
for obj in stage.objects:
if isinstance(obj, obj_name) and obj.location in self.parts:
return obj
if isinstance(obj_name, str):
# If not, try to find the named object
obj = self.stage.find(obj_name)
else:
obj = obj_name
if obj.location in self.parts:
return obj
def get_held_obj(self, part):
"""Get the object held by a given body part. Returns None if the body part isn't holding anything"""
for obj in stage.objects:
if obj.location == part:
return obj
def free_hand(self):
"""Return the hand that isn't holding anything"""
right_free = True
left_free = True
for obj in stage.objects:
if obj.location == self.right_hand:
right_free = False
elif obj.location == self.left_hand:
left_free = False
if right_free:
return self.right_hand
if left_free:
return self.left_hand
@property
def is_alive(self):
return self.health > 0
def set_starting_location(self, location):
"""Setting the starting location changes the world model and also prints an explicit
message. It's idempotent and so safe to call in a loop because I'm lazy"""
if location and not self.location:
self.location = location
print("(The {} is at the {}.)".format(self.name, self.location.name))
def drop(self, obj, target):
"""Drop an object in a place or on a supporting object. Is a no-op if the actor doesn't have the object."""
if self.get_if_held(obj.name):
print("put {} {} {}".format(obj.name, target.preposition, target.name))
obj.move_to(target)
def __init__(self, name):
super(Person, self).__init__(name)
self.health = DEFAULT_HEALTH
self.path = [] # A path of Places the person is currently walking
self.queue = [] # A queue of functions to call next
self.right_hand = Thing("{}'s right hand".format(self.name), preposition='in')
self.left_hand = Thing("{}'s left hand".format(self.name), preposition='in')
self.body = Thing("{}".format(self.name))
self.parts = [self.left_hand, self.right_hand, self.body]
self.escaped = False # The final endgame state
class Robber(Person):
"""The Robber wants to deposit the money, drink, kill the sheriff, and escape with the money"""
def initiative(self):
actor_initiative = super(Robber, self).initiative()
# If the Robber has the money and the Sheriff is alive,
# the Robber wants to drop the money in the Corner
if self.get_if_held('money') and self.enemy.is_alive:
actor_initiative += HIGH_INITIATIVE
log.debug("%s is returning initiative %s", self.name, actor_initiative)
return actor_initiative
def act(self):
"""A set of conditions of high priority; these actions will be executed first"""
if self.location.name == 'corner' and self.get_if_held('money') and self.enemy.is_alive:
money = self.get_if_held('money')
self.drop(money, self.location)
return True
return super(Robber, self).act()
def starting_hit_weight(self):
"""The Robber (but _not_ the Sheriff) is a better shot if he's drunk"""
return self.inebriation + 2
class Sheriff(Person):
"""The Sheriff wants to kill the Robber and leave with the money. He does not get a drink bonus and arrives
on a delay."""
def __init__(self, name, delay):
super(Sheriff, self).__init__(name)
self.delay = delay
def initiative(self):
actor_initiative = super(Sheriff, self).initiative()
# The Sheriff is subject to the global timer and will do nothing until it expires
if self.stage.elapsed_time < self.delay:
actor_initiative = 0
elif self.location == None:
# If they haven't moved, tell them they want to move to the table
actor_initiative += HIGH_INITIATIVE
log.debug("%s is returning initiative %s", self.name, actor_initiative)
return actor_initiative
def act(self):
"""The Sheriff wants to get in the house right away"""
if self.location == None:
self.path = ['window', 'door']
return super(Sheriff, self).act()
def starting_hit_weight(self):
"""The Sheriff (but _not_ the Robber) is a better shot if he's injured"""
weight = 1
if self.health < DEFAULT_HEALTH:
weight += 3
return weight
class Gun(Thing):
"""A Gun is an object with a distinct property of being shootable and having a number of bullets"""
num_bullets = 0
def __init__(self, name):
super(Gun, self).__init__(name)
self.num_bullets = DEFAULT_NUM_BULLETS
class Holster(Thing):
def __init__(self, name, preposition='in'):
super(Holster, self).__init__(name, preposition=preposition)
class Container(Thing):
"""A Container is a vessel that can contain a thing (whisky)"""
volume = 0
def __init__(self, name):
super(Container, self).__init__(name)
@property
def full(self):
"""A container is 'full' if it contains any volume"""
return self.volume > 0
def pour(self, new_container):
"""Pouring from a full container into an empty container makes
the other container full. It doesn't make the source container
any less full because magic. If the source container is empty,
this is a no-op. Returns True if the pour succeeded."""
if self.full:
print("pour")
new_container.volume = 3
return True
def drink(self, actor):
"""Drinking from a full container changes the inebriation status
of the actor. Drinking from an empty glass has no effect.
Returns True if the drink succeeded."""
if self.full:
print("take a drink from {}".format(self))
actor.inebriation += 1
self.volume -= 1
return True
def init(delay):
"""Initialize the starting conditions"""
# Humans
robber = Robber('robber')
robber_gun = Gun('gun')
robber_gun.move_to(robber.right_hand)
money = Thing('money')
money.move_to(robber.left_hand)
robber_holster = Holster('holster')
robber_holster.move_to(robber.body)
robber.stage = stage # A mechanism to get ahold of the world state
sheriff = Sheriff('sheriff', delay=delay)
sheriff_gun = Gun("sheriff's gun")
sheriff_gun.move_to(sheriff.right_hand)
holster = Holster("sheriff's holster")
holster.move_to(sheriff.body)
sheriff.stage = stage
robber.enemy = sheriff
sheriff.enemy = robber
# Places
window = Place('window')
table = Place('table')
door = Door('door')
corner = Place('corner')
sheriff.default_location = None # nowhere
robber.default_location = window
robber.path = [door, corner]
# Objects
glass = Container('glass')
bottle = Container('bottle')
bottle.volume = 10
glass.move_to(table)
bottle.move_to(table)
stage.current_scene += 1
loop()
def loop():
"""Main story loop, initialized by the delay before the sheriff arrives"""
# Start with the world status
print ("\nAct 1 Scene {}\n\n".format(stage.current_scene))
for obj in stage.objects:
if not isinstance(obj, Person) and obj.status():
print(obj.status() + '.', end=" ")
print()
next_actor = stage.actors[0]
while True:
print()
print(next_actor.name.upper())
next_actor = action(next_actor)
if next_actor.escaped:
print("CURTAIN")
stage.objects = []
stage.places = []
break
if __name__ == '__main__':
delay = input('Select arrival time for SHERIFF or ENTER for default: ') or DEFAULT_SHERIFF_DELAY
print("""
SAGA III
An Original Play
by
A Computer """)
for i in range(0, MAX_SCENES):
init(delay=int(delay))
|
jdahlin/pygobject
|
refs/heads/master
|
tests/runtests-windows.py
|
6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import glob
import unittest
os.environ['PYGTK_USE_GIL_STATE_API'] = ''
sys.path.insert(0, os.path.dirname(__file__))
sys.argv.append('--g-fatal-warnings')
from gi.repository import GObject
GObject.threads_init()
SKIP_FILES = ['runtests',
'test_mainloop', # no os.fork on windows
'test_subprocess'] # blocks on testChildWatch
if __name__ == '__main__':
testdir = os.path.split(os.path.abspath(__file__))[0]
os.chdir(testdir)
def gettestnames():
files = glob.glob('*.py')
names = map(lambda x: x[:-3], files)
map(names.remove, SKIP_FILES)
return names
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for name in gettestnames():
try:
suite.addTest(loader.loadTestsFromName(name))
except Exception, e:
print 'Could not load %s: %s' % (name, e)
testRunner = unittest.TextTestRunner()
testRunner.verbosity = 2
testRunner.run(suite)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.